From patchwork Tue Nov 3 10:07:42 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 83520 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id D5F77A0521; Tue, 3 Nov 2020 11:07:01 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id B2E0EC848; Tue, 3 Nov 2020 11:07:00 +0100 (CET) Received: from smtpbgsg2.qq.com (smtpbgsg2.qq.com [54.254.200.128]) by dpdk.org (Postfix) with ESMTP id 22CE0C82C for ; Tue, 3 Nov 2020 11:06:56 +0100 (CET) X-QQ-mid: bizesmtp26t1604398013tha0r07n Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp10.qq.com (ESMTP) with id ; Tue, 03 Nov 2020 18:06:52 +0800 (CST) X-QQ-SSF: 01400000002000C0C000B00A0000000 X-QQ-FEAT: oPf+tCeE8zEu0fbzca7/rE0d0u5QPL2fF/68BdIuIQQuZt6NFtOWSvVDokEnq nMOBpN9Ia6/dyOCwKmfRzzOEwXVJoGkg3Z8nmcGoOWhuolOTECzp/nRJ1O46dlpXkNgA/fs 8X+R9pn/D46aI0dIOeze1vmWCbfzH6vehhD7Pp6Eav9MukkRU3b1YbMIIHFvh58BG1FTGti Vg7/vVMLfWf0ky9AOj6N3xJ8qE+/hEYj9sR0wlewtGKI8w5FEetyiwM9IRjFyMGhHOpe/Q0 1GC7jfhI5QVU7eEmXcOrFC/IRTeDGggdqJQHUuSfE++rUlNQG06jzW9XOvqwWGZK8AZIOsC /naExICWr2s1jLaowSCU3EPq5HgBg== X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Tue, 3 Nov 2020 18:07:42 +0800 Message-Id: <20201103100818.311881-2-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20201103100818.311881-1-jiawenwu@trustnetic.com> References: <20201103100818.311881-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign7 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH 01/37] net/txgbe: add ntuple filter init and uninit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add ntuple filter init and uninit. Signed-off-by: Jiawen Wu --- drivers/net/txgbe/base/txgbe_type.h | 1 + drivers/net/txgbe/txgbe_ethdev.c | 28 ++++++++++++++++++++++++ drivers/net/txgbe/txgbe_ethdev.h | 33 +++++++++++++++++++++++++++++ 3 files changed, 62 insertions(+) diff --git a/drivers/net/txgbe/base/txgbe_type.h b/drivers/net/txgbe/base/txgbe_type.h index b322a2cac..69aa8993a 100644 --- a/drivers/net/txgbe/base/txgbe_type.h +++ b/drivers/net/txgbe/base/txgbe_type.h @@ -15,6 +15,7 @@ #define TXGBE_FRAME_SIZE_DFT (1518) /* Default frame size, +FCS */ #define TXGBE_NUM_POOL (64) #define TXGBE_PBTXSIZE_MAX 0x00028000 /* 160KB Packet Buffer */ +#define TXGBE_MAX_FTQF_FILTERS 128 #define TXGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */ #define TXGBE_MAX_UP 8 #define TXGBE_MAX_QP (128) diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c index 189caf2e9..08f19a2ef 100644 --- a/drivers/net/txgbe/txgbe_ethdev.c +++ b/drivers/net/txgbe/txgbe_ethdev.c @@ -470,6 +470,7 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(eth_dev); struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(eth_dev); struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(eth_dev); + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev); struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(eth_dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; const struct rte_memzone *mz; @@ -677,6 +678,13 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) /* enable support intr */ txgbe_enable_intr(eth_dev); + /* initialize filter info */ + memset(filter_info, 0, + sizeof(struct txgbe_filter_info)); + + /* initialize 5tuple filter list */ + TAILQ_INIT(&filter_info->fivetuple_list); + /* initialize bandwidth configuration info */ memset(bw_conf, 0, sizeof(struct txgbe_bw_conf)); @@ -696,6 +704,23 @@ eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev) return 0; } +static int txgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev) +{ + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev); + struct txgbe_5tuple_filter *p_5tuple; + + while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) { + TAILQ_REMOVE(&filter_info->fivetuple_list, + p_5tuple, + entries); + rte_free(p_5tuple); + } + memset(filter_info->fivetuple_mask, 0, + sizeof(uint32_t) * TXGBE_5TUPLE_ARRAY_SIZE); + + return 0; +} + static int eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_device *pci_dev) @@ -1774,6 +1799,9 @@ txgbe_dev_close(struct rte_eth_dev *dev) rte_free(dev->data->hash_mac_addrs); dev->data->hash_mac_addrs = NULL; + /* Remove all ntuple filters of the device */ + txgbe_ntuple_filter_uninit(dev); + return ret; } diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h index 2c3680218..60687bc49 100644 --- a/drivers/net/txgbe/txgbe_ethdev.h +++ b/drivers/net/txgbe/txgbe_ethdev.h @@ -110,6 +110,36 @@ struct txgbe_vf_info { uint16_t mac_count; }; +TAILQ_HEAD(txgbe_5tuple_filter_list, txgbe_5tuple_filter); + +struct txgbe_5tuple_filter_info { + uint32_t dst_ip; + uint32_t src_ip; + uint16_t dst_port; + uint16_t src_port; + enum txgbe_5tuple_protocol proto; /* l4 protocol. */ + uint8_t priority; /* seven levels (001b-111b), 111b is highest, + * used when more than one filter matches. + */ + uint8_t dst_ip_mask:1, /* if mask is 1b, do not compare dst ip. */ + src_ip_mask:1, /* if mask is 1b, do not compare src ip. */ + dst_port_mask:1, /* if mask is 1b, do not compare dst port. */ + src_port_mask:1, /* if mask is 1b, do not compare src port. */ + proto_mask:1; /* if mask is 1b, do not compare protocol. */ +}; + +/* 5tuple filter structure */ +struct txgbe_5tuple_filter { + TAILQ_ENTRY(txgbe_5tuple_filter) entries; + uint16_t index; /* the index of 5tuple filter */ + struct txgbe_5tuple_filter_info filter_info; + uint16_t queue; /* rx queue assigned to */ +}; + +#define TXGBE_5TUPLE_ARRAY_SIZE \ + (RTE_ALIGN(TXGBE_MAX_FTQF_FILTERS, (sizeof(uint32_t) * NBBY)) / \ + (sizeof(uint32_t) * NBBY)) + struct txgbe_ethertype_filter { uint16_t ethertype; uint32_t etqf; @@ -128,6 +158,9 @@ struct txgbe_filter_info { uint8_t ethertype_mask; /* Bit mask for every used ethertype filter */ /* store used ethertype filters*/ struct txgbe_ethertype_filter ethertype_filters[TXGBE_ETF_ID_MAX]; + /* Bit mask for every used 5tuple filter */ + uint32_t fivetuple_mask[TXGBE_5TUPLE_ARRAY_SIZE]; + struct txgbe_5tuple_filter_list fivetuple_list; }; /* The configuration of bandwidth */ From patchwork Tue Nov 3 10:07:43 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 83522 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id A982DA0521; Tue, 3 Nov 2020 11:07:46 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 917A9C8C6; Tue, 3 Nov 2020 11:07:04 +0100 (CET) Received: from smtpproxy21.qq.com (smtpbg704.qq.com [203.205.195.105]) by dpdk.org (Postfix) with ESMTP id CA189C82C for ; Tue, 3 Nov 2020 11:06:59 +0100 (CET) X-QQ-mid: bizesmtp26t1604398014t7zi9dil Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp10.qq.com (ESMTP) with id ; Tue, 03 Nov 2020 18:06:53 +0800 (CST) X-QQ-SSF: 01400000002000C0C000B00A0000000 X-QQ-FEAT: 1G/RtVmKuRXVKgOIf5xavYgOgAZ1uXL+6ZeyzTHOkQo64dAEnNW4xVaTiKEqT Jg6XfWE3xOzoA7NEPrsWdSs79Nq2CppfR5UW1q5khoOLuZ6Grypa5WT5jqxoSZ2PFb+RcU+ CgRBFu6fJgsap3cSjBssRyBA67ZvvEaPBYh/bn6m0FGcLsBE1cjil9N0AlzRqtyJbO9nHM9 4ldOFUuoQ47MnEk3Nj/NcJfIc77l8jR5MLfuYCweeBVjbWukSEVYIZNxUIIsAN1QDujsMN5 /8+vPn4/GB+vKZTMOXkhNcD1K1Rf4/G6Do5FgUhO39R6GjNWmnHej2kCb0fDi5Vq6oqQl/Q Gtmhi53BNHlM5PFPbA= X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Tue, 3 Nov 2020 18:07:43 +0800 Message-Id: <20201103100818.311881-3-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20201103100818.311881-1-jiawenwu@trustnetic.com> References: <20201103100818.311881-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign6 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH 02/37] net/txgbe: support ntuple filter add and delete X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Support add and delete operations on ntuple filter. Signed-off-by: Jiawen Wu --- drivers/net/txgbe/txgbe_ethdev.c | 310 +++++++++++++++++++++++++++++++ drivers/net/txgbe/txgbe_ethdev.h | 6 + 2 files changed, 316 insertions(+) diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c index 08f19a2ef..fe32194a7 100644 --- a/drivers/net/txgbe/txgbe_ethdev.c +++ b/drivers/net/txgbe/txgbe_ethdev.c @@ -109,6 +109,8 @@ static void txgbe_dev_interrupt_handler(void *param); static void txgbe_dev_interrupt_delayed_handler(void *param); static void txgbe_configure_msix(struct rte_eth_dev *dev); +static int txgbe_filter_restore(struct rte_eth_dev *dev); + #define TXGBE_SET_HWSTRIP(h, q) do {\ uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ @@ -1611,6 +1613,7 @@ txgbe_dev_start(struct rte_eth_dev *dev) /* resume enabled intr since hw reset */ txgbe_enable_intr(dev); + txgbe_filter_restore(dev); /* * Update link status right before return, because it may @@ -3693,6 +3696,293 @@ txgbe_set_queue_rate_limit(struct rte_eth_dev *dev, return 0; } +static inline enum txgbe_5tuple_protocol +convert_protocol_type(uint8_t protocol_value) +{ + if (protocol_value == IPPROTO_TCP) + return TXGBE_5TF_PROT_TCP; + else if (protocol_value == IPPROTO_UDP) + return TXGBE_5TF_PROT_UDP; + else if (protocol_value == IPPROTO_SCTP) + return TXGBE_5TF_PROT_SCTP; + else + return TXGBE_5TF_PROT_NONE; +} + +/* inject a 5-tuple filter to HW */ +static inline void +txgbe_inject_5tuple_filter(struct rte_eth_dev *dev, + struct txgbe_5tuple_filter *filter) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + int i; + uint32_t ftqf, sdpqf; + uint32_t l34timir = 0; + uint32_t mask = TXGBE_5TFCTL0_MASK; + + i = filter->index; + sdpqf = TXGBE_5TFPORT_DST(be_to_le16(filter->filter_info.dst_port)); + sdpqf |= TXGBE_5TFPORT_SRC(be_to_le16(filter->filter_info.src_port)); + + ftqf = TXGBE_5TFCTL0_PROTO(filter->filter_info.proto); + ftqf |= TXGBE_5TFCTL0_PRI(filter->filter_info.priority); + if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */ + mask &= ~TXGBE_5TFCTL0_MSADDR; + if (filter->filter_info.dst_ip_mask == 0) + mask &= ~TXGBE_5TFCTL0_MDADDR; + if (filter->filter_info.src_port_mask == 0) + mask &= ~TXGBE_5TFCTL0_MSPORT; + if (filter->filter_info.dst_port_mask == 0) + mask &= ~TXGBE_5TFCTL0_MDPORT; + if (filter->filter_info.proto_mask == 0) + mask &= ~TXGBE_5TFCTL0_MPROTO; + ftqf |= mask; + ftqf |= TXGBE_5TFCTL0_MPOOL; + ftqf |= TXGBE_5TFCTL0_ENA; + + wr32(hw, TXGBE_5TFDADDR(i), be_to_le32(filter->filter_info.dst_ip)); + wr32(hw, TXGBE_5TFSADDR(i), be_to_le32(filter->filter_info.src_ip)); + wr32(hw, TXGBE_5TFPORT(i), sdpqf); + wr32(hw, TXGBE_5TFCTL0(i), ftqf); + + l34timir |= TXGBE_5TFCTL1_QP(filter->queue); + wr32(hw, TXGBE_5TFCTL1(i), l34timir); +} + +/* + * add a 5tuple filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * index: the index the filter allocates. + * filter: ponter to the filter that will be added. + * rx_queue: the queue id the filter assigned to. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +txgbe_add_5tuple_filter(struct rte_eth_dev *dev, + struct txgbe_5tuple_filter *filter) +{ + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); + int i, idx, shift; + + /* + * look for an unused 5tuple filter index, + * and insert the filter to list. + */ + for (i = 0; i < TXGBE_MAX_FTQF_FILTERS; i++) { + idx = i / (sizeof(uint32_t) * NBBY); + shift = i % (sizeof(uint32_t) * NBBY); + if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) { + filter_info->fivetuple_mask[idx] |= 1 << shift; + filter->index = i; + TAILQ_INSERT_TAIL(&filter_info->fivetuple_list, + filter, + entries); + break; + } + } + if (i >= TXGBE_MAX_FTQF_FILTERS) { + PMD_DRV_LOG(ERR, "5tuple filters are full."); + return -ENOSYS; + } + + txgbe_inject_5tuple_filter(dev, filter); + + return 0; +} + +/* + * remove a 5tuple filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * filter: the pointer of the filter will be removed. + */ +static void +txgbe_remove_5tuple_filter(struct rte_eth_dev *dev, + struct txgbe_5tuple_filter *filter) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); + uint16_t index = filter->index; + + filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &= + ~(1 << (index % (sizeof(uint32_t) * NBBY))); + TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries); + rte_free(filter); + + wr32(hw, TXGBE_5TFDADDR(index), 0); + wr32(hw, TXGBE_5TFSADDR(index), 0); + wr32(hw, TXGBE_5TFPORT(index), 0); + wr32(hw, TXGBE_5TFCTL0(index), 0); + wr32(hw, TXGBE_5TFCTL1(index), 0); +} + +static inline struct txgbe_5tuple_filter * +txgbe_5tuple_filter_lookup(struct txgbe_5tuple_filter_list *filter_list, + struct txgbe_5tuple_filter_info *key) +{ + struct txgbe_5tuple_filter *it; + + TAILQ_FOREACH(it, filter_list, entries) { + if (memcmp(key, &it->filter_info, + sizeof(struct txgbe_5tuple_filter_info)) == 0) { + return it; + } + } + return NULL; +} + +/* translate elements in struct rte_eth_ntuple_filter + * to struct txgbe_5tuple_filter_info + */ +static inline int +ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter, + struct txgbe_5tuple_filter_info *filter_info) +{ + if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM || + filter->priority > TXGBE_5TUPLE_MAX_PRI || + filter->priority < TXGBE_5TUPLE_MIN_PRI) + return -EINVAL; + + switch (filter->dst_ip_mask) { + case UINT32_MAX: + filter_info->dst_ip_mask = 0; + filter_info->dst_ip = filter->dst_ip; + break; + case 0: + filter_info->dst_ip_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid dst_ip mask."); + return -EINVAL; + } + + switch (filter->src_ip_mask) { + case UINT32_MAX: + filter_info->src_ip_mask = 0; + filter_info->src_ip = filter->src_ip; + break; + case 0: + filter_info->src_ip_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid src_ip mask."); + return -EINVAL; + } + + switch (filter->dst_port_mask) { + case UINT16_MAX: + filter_info->dst_port_mask = 0; + filter_info->dst_port = filter->dst_port; + break; + case 0: + filter_info->dst_port_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid dst_port mask."); + return -EINVAL; + } + + switch (filter->src_port_mask) { + case UINT16_MAX: + filter_info->src_port_mask = 0; + filter_info->src_port = filter->src_port; + break; + case 0: + filter_info->src_port_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid src_port mask."); + return -EINVAL; + } + + switch (filter->proto_mask) { + case UINT8_MAX: + filter_info->proto_mask = 0; + filter_info->proto = + convert_protocol_type(filter->proto); + break; + case 0: + filter_info->proto_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid protocol mask."); + return -EINVAL; + } + + filter_info->priority = (uint8_t)filter->priority; + return 0; +} + +/* + * add or delete a ntuple filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * ntuple_filter: Pointer to struct rte_eth_ntuple_filter + * add: if true, add filter, if false, remove filter + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +int +txgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter, + bool add) +{ + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); + struct txgbe_5tuple_filter_info filter_5tuple; + struct txgbe_5tuple_filter *filter; + int ret; + + if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) { + PMD_DRV_LOG(ERR, "only 5tuple is supported."); + return -EINVAL; + } + + memset(&filter_5tuple, 0, sizeof(struct txgbe_5tuple_filter_info)); + ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple); + if (ret < 0) + return ret; + + filter = txgbe_5tuple_filter_lookup(&filter_info->fivetuple_list, + &filter_5tuple); + if (filter != NULL && add) { + PMD_DRV_LOG(ERR, "filter exists."); + return -EEXIST; + } + if (filter == NULL && !add) { + PMD_DRV_LOG(ERR, "filter doesn't exist."); + return -ENOENT; + } + + if (add) { + filter = rte_zmalloc("txgbe_5tuple_filter", + sizeof(struct txgbe_5tuple_filter), 0); + if (filter == NULL) + return -ENOMEM; + rte_memcpy(&filter->filter_info, + &filter_5tuple, + sizeof(struct txgbe_5tuple_filter_info)); + filter->queue = ntuple_filter->queue; + ret = txgbe_add_5tuple_filter(dev, filter); + if (ret < 0) { + rte_free(filter); + return ret; + } + } else { + txgbe_remove_5tuple_filter(dev, filter); + } + + return 0; +} + static u8 * txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq) @@ -4214,6 +4504,26 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev, return 0; } +/* restore n-tuple filter */ +static inline void +txgbe_ntuple_filter_restore(struct rte_eth_dev *dev) +{ + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); + struct txgbe_5tuple_filter *node; + + TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) { + txgbe_inject_5tuple_filter(dev, node); + } +} + +static int +txgbe_filter_restore(struct rte_eth_dev *dev) +{ + txgbe_ntuple_filter_restore(dev); + + return 0; +} + static const struct eth_dev_ops txgbe_eth_dev_ops = { .dev_configure = txgbe_dev_configure, .dev_infos_get = txgbe_dev_info_get, diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h index 60687bc49..caccf342d 100644 --- a/drivers/net/txgbe/txgbe_ethdev.h +++ b/drivers/net/txgbe/txgbe_ethdev.h @@ -39,6 +39,9 @@ #define TXGBE_MAX_QUEUE_NUM_PER_VF 8 +#define TXGBE_5TUPLE_MAX_PRI 7 +#define TXGBE_5TUPLE_MIN_PRI 1 + #define TXGBE_RSS_OFFLOAD_ALL ( \ ETH_RSS_IPV4 | \ ETH_RSS_NONFRAG_IPV4_TCP | \ @@ -311,6 +314,9 @@ int txgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev, bool txgbe_rss_update_sp(enum txgbe_mac_type mac_type); +int txgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *filter, + bool add); void txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction, uint8_t queue, uint8_t msix_vector); From patchwork Tue Nov 3 10:07:44 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 83525 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id DA40AA0521; Tue, 3 Nov 2020 11:08:50 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 03FB9C8F8; Tue, 3 Nov 2020 11:07:09 +0100 (CET) Received: from smtpbgbr2.qq.com (smtpbgbr2.qq.com [54.207.22.56]) by dpdk.org (Postfix) with ESMTP id 2E255C8C4 for ; Tue, 3 Nov 2020 11:07:02 +0100 (CET) X-QQ-mid: bizesmtp26t1604398015t4ipq1ih Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp10.qq.com (ESMTP) with id ; Tue, 03 Nov 2020 18:06:55 +0800 (CST) X-QQ-SSF: 01400000002000C0C000B00A0000000 X-QQ-FEAT: pDrqMl8+oMF8KfeT5PT7pQy5aTee8HYWB3jJVYB10hQVidPGuLgPf+OqESZTc MY0nuO3Pm+3hauk9CN920rJ8GA7W0OOJJ47VWH74i9jy9Z7CcyGh9XCeQInp1XiIofk/G5u Y6P2NE1QN3nhjVA+cX2J1Kq5jeEZb0wt9tfA5q45FULwOAwnUYSww23bnoRAFuuUhEYy0lf 22NclW/TORNhEC7qK/XHU0TeySjbU6D6rjaItmS0HoMmc78PxdKHQ1pxoakjwnyob7WhzgB tfzQZLpW1j+83kmBVh2JWeBD5jDVJG/rIwZni9T135KjNFGkEoaPt/tTWk0azjFxvbOl0Ki xCespuNgjV0fsg8bnRSTyY7vFBQkJAPdnoih8GF X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Tue, 3 Nov 2020 18:07:44 +0800 Message-Id: <20201103100818.311881-4-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20201103100818.311881-1-jiawenwu@trustnetic.com> References: <20201103100818.311881-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign7 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH 03/37] net/txgbe: add ntuple parse rule X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add support to parse flow for ntuple filter. Signed-off-by: Jiawen Wu --- drivers/net/txgbe/meson.build | 1 + drivers/net/txgbe/txgbe_flow.c | 536 +++++++++++++++++++++++++++++++++ 2 files changed, 537 insertions(+) create mode 100644 drivers/net/txgbe/txgbe_flow.c diff --git a/drivers/net/txgbe/meson.build b/drivers/net/txgbe/meson.build index 345dffaf6..45379175d 100644 --- a/drivers/net/txgbe/meson.build +++ b/drivers/net/txgbe/meson.build @@ -6,6 +6,7 @@ objs = [base_objs] sources = files( 'txgbe_ethdev.c', + 'txgbe_flow.c', 'txgbe_ptypes.c', 'txgbe_pf.c', 'txgbe_rxtx.c', diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c new file mode 100644 index 000000000..6f8be3b7f --- /dev/null +++ b/drivers/net/txgbe/txgbe_flow.c @@ -0,0 +1,536 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2015-2020 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "txgbe_logs.h" +#include "base/txgbe.h" +#include "txgbe_ethdev.h" +#include "txgbe_rxtx.h" + +#define TXGBE_MIN_N_TUPLE_PRIO 1 +#define TXGBE_MAX_N_TUPLE_PRIO 7 + +/** + * Endless loop will never happen with below assumption + * 1. there is at least one no-void item(END) + * 2. cur is before END. + */ +static inline +const struct rte_flow_item *next_no_void_pattern( + const struct rte_flow_item pattern[], + const struct rte_flow_item *cur) +{ + const struct rte_flow_item *next = + cur ? cur + 1 : &pattern[0]; + while (1) { + if (next->type != RTE_FLOW_ITEM_TYPE_VOID) + return next; + next++; + } +} + +static inline +const struct rte_flow_action *next_no_void_action( + const struct rte_flow_action actions[], + const struct rte_flow_action *cur) +{ + const struct rte_flow_action *next = + cur ? cur + 1 : &actions[0]; + while (1) { + if (next->type != RTE_FLOW_ACTION_TYPE_VOID) + return next; + next++; + } +} + +/** + * Please aware there's an asumption for all the parsers. + * rte_flow_item is using big endian, rte_flow_attr and + * rte_flow_action are using CPU order. + * Because the pattern is used to describe the packets, + * normally the packets should use network order. + */ + +/** + * Parse the rule to see if it is a n-tuple rule. + * And get the n-tuple filter info BTW. + * pattern: + * The first not void item can be ETH or IPV4. + * The second not void item must be IPV4 if the first one is ETH. + * The third not void item must be UDP or TCP. + * The next not void item must be END. + * action: + * The first not void action should be QUEUE. + * The next not void action should be END. + * pattern example: + * ITEM Spec Mask + * ETH NULL NULL + * IPV4 src_addr 192.168.1.20 0xFFFFFFFF + * dst_addr 192.167.3.50 0xFFFFFFFF + * next_proto_id 17 0xFF + * UDP/TCP/ src_port 80 0xFFFF + * SCTP dst_port 80 0xFFFF + * END + * other members in mask and spec should set to 0x00. + * item->last should be NULL. + */ +static int +cons_parse_ntuple_filter(const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_eth_ntuple_filter *filter, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item; + const struct rte_flow_action *act; + const struct rte_flow_item_ipv4 *ipv4_spec; + const struct rte_flow_item_ipv4 *ipv4_mask; + const struct rte_flow_item_tcp *tcp_spec; + const struct rte_flow_item_tcp *tcp_mask; + const struct rte_flow_item_udp *udp_spec; + const struct rte_flow_item_udp *udp_mask; + const struct rte_flow_item_sctp *sctp_spec; + const struct rte_flow_item_sctp *sctp_mask; + const struct rte_flow_item_eth *eth_spec; + const struct rte_flow_item_eth *eth_mask; + const struct rte_flow_item_vlan *vlan_spec; + const struct rte_flow_item_vlan *vlan_mask; + struct rte_flow_item_eth eth_null; + struct rte_flow_item_vlan vlan_null; + + if (!pattern) { + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, "NULL pattern."); + return -rte_errno; + } + + if (!actions) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, + NULL, "NULL action."); + return -rte_errno; + } + if (!attr) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + NULL, "NULL attribute."); + return -rte_errno; + } + + memset(ð_null, 0, sizeof(struct rte_flow_item_eth)); + memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan)); + + /* the first not void item can be MAC or IPv4 */ + item = next_no_void_pattern(pattern, NULL); + + if (item->type != RTE_FLOW_ITEM_TYPE_ETH && + item->type != RTE_FLOW_ITEM_TYPE_IPV4) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + /* Skip Ethernet */ + if (item->type == RTE_FLOW_ITEM_TYPE_ETH) { + eth_spec = item->spec; + eth_mask = item->mask; + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + /* if the first item is MAC, the content should be NULL */ + if ((item->spec || item->mask) && + (memcmp(eth_spec, ð_null, + sizeof(struct rte_flow_item_eth)) || + memcmp(eth_mask, ð_null, + sizeof(struct rte_flow_item_eth)))) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + /* check if the next not void item is IPv4 or Vlan */ + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_VLAN) { + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + } + + if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { + vlan_spec = item->spec; + vlan_mask = item->mask; + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + /* the content should be NULL */ + if ((item->spec || item->mask) && + (memcmp(vlan_spec, &vlan_null, + sizeof(struct rte_flow_item_vlan)) || + memcmp(vlan_mask, &vlan_null, + sizeof(struct rte_flow_item_vlan)))) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + /* check if the next not void item is IPv4 */ + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) { + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + } + + if (item->mask) { + /* get the IPv4 info */ + if (!item->spec || !item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid ntuple mask"); + return -rte_errno; + } + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + ipv4_mask = item->mask; + /** + * Only support src & dst addresses, protocol, + * others should be masked. + */ + if (ipv4_mask->hdr.version_ihl || + ipv4_mask->hdr.type_of_service || + ipv4_mask->hdr.total_length || + ipv4_mask->hdr.packet_id || + ipv4_mask->hdr.fragment_offset || + ipv4_mask->hdr.time_to_live || + ipv4_mask->hdr.hdr_checksum) { + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + if ((ipv4_mask->hdr.src_addr != 0 && + ipv4_mask->hdr.src_addr != UINT32_MAX) || + (ipv4_mask->hdr.dst_addr != 0 && + ipv4_mask->hdr.dst_addr != UINT32_MAX) || + (ipv4_mask->hdr.next_proto_id != UINT8_MAX && + ipv4_mask->hdr.next_proto_id != 0)) { + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + + filter->dst_ip_mask = ipv4_mask->hdr.dst_addr; + filter->src_ip_mask = ipv4_mask->hdr.src_addr; + filter->proto_mask = ipv4_mask->hdr.next_proto_id; + + ipv4_spec = item->spec; + filter->dst_ip = ipv4_spec->hdr.dst_addr; + filter->src_ip = ipv4_spec->hdr.src_addr; + filter->proto = ipv4_spec->hdr.next_proto_id; + } + + /* check if the next not void item is TCP or UDP */ + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_TCP && + item->type != RTE_FLOW_ITEM_TYPE_UDP && + item->type != RTE_FLOW_ITEM_TYPE_SCTP && + item->type != RTE_FLOW_ITEM_TYPE_END) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + + if (item->type != RTE_FLOW_ITEM_TYPE_END && + (!item->spec && !item->mask)) { + goto action; + } + + /* get the TCP/UDP/SCTP info */ + if (item->type != RTE_FLOW_ITEM_TYPE_END && + (!item->spec || !item->mask)) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid ntuple mask"); + return -rte_errno; + } + + /*Not supported last point for range*/ + if (item->last) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + if (item->type == RTE_FLOW_ITEM_TYPE_TCP) { + tcp_mask = item->mask; + + /** + * Only support src & dst ports, tcp flags, + * others should be masked. + */ + if (tcp_mask->hdr.sent_seq || + tcp_mask->hdr.recv_ack || + tcp_mask->hdr.data_off || + tcp_mask->hdr.rx_win || + tcp_mask->hdr.cksum || + tcp_mask->hdr.tcp_urp) { + memset(filter, 0, + sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + if ((tcp_mask->hdr.src_port != 0 && + tcp_mask->hdr.src_port != UINT16_MAX) || + (tcp_mask->hdr.dst_port != 0 && + tcp_mask->hdr.dst_port != UINT16_MAX)) { + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + + filter->dst_port_mask = tcp_mask->hdr.dst_port; + filter->src_port_mask = tcp_mask->hdr.src_port; + if (tcp_mask->hdr.tcp_flags == 0xFF) { + filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG; + } else if (!tcp_mask->hdr.tcp_flags) { + filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG; + } else { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + + tcp_spec = item->spec; + filter->dst_port = tcp_spec->hdr.dst_port; + filter->src_port = tcp_spec->hdr.src_port; + filter->tcp_flags = tcp_spec->hdr.tcp_flags; + } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) { + udp_mask = item->mask; + + /** + * Only support src & dst ports, + * others should be masked. + */ + if (udp_mask->hdr.dgram_len || + udp_mask->hdr.dgram_cksum) { + memset(filter, 0, + sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + if ((udp_mask->hdr.src_port != 0 && + udp_mask->hdr.src_port != UINT16_MAX) || + (udp_mask->hdr.dst_port != 0 && + udp_mask->hdr.dst_port != UINT16_MAX)) { + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + + filter->dst_port_mask = udp_mask->hdr.dst_port; + filter->src_port_mask = udp_mask->hdr.src_port; + + udp_spec = item->spec; + filter->dst_port = udp_spec->hdr.dst_port; + filter->src_port = udp_spec->hdr.src_port; + } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) { + sctp_mask = item->mask; + + /** + * Only support src & dst ports, + * others should be masked. + */ + if (sctp_mask->hdr.tag || + sctp_mask->hdr.cksum) { + memset(filter, 0, + sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + + filter->dst_port_mask = sctp_mask->hdr.dst_port; + filter->src_port_mask = sctp_mask->hdr.src_port; + + sctp_spec = item->spec; + filter->dst_port = sctp_spec->hdr.dst_port; + filter->src_port = sctp_spec->hdr.src_port; + } else { + goto action; + } + + /* check if the next not void item is END */ + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_END) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + +action: + + /** + * n-tuple only supports forwarding, + * check if the first not void action is QUEUE. + */ + act = next_no_void_action(actions, NULL); + if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + item, "Not supported action."); + return -rte_errno; + } + filter->queue = + ((const struct rte_flow_action_queue *)act->conf)->index; + + /* check if the next not void item is END */ + act = next_no_void_action(actions, act); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + /* parse attr */ + /* must be input direction */ + if (!attr->ingress) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, "Only support ingress."); + return -rte_errno; + } + + /* not supported */ + if (attr->egress) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + attr, "Not support egress."); + return -rte_errno; + } + + /* not supported */ + if (attr->transfer) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + + if (attr->priority > 0xFFFF) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Error priority."); + return -rte_errno; + } + filter->priority = (uint16_t)attr->priority; + if (attr->priority < TXGBE_MIN_N_TUPLE_PRIO || + attr->priority > TXGBE_MAX_N_TUPLE_PRIO) + filter->priority = 1; + + return 0; +} + +/* a specific function for txgbe because the flags is specific */ +static int +txgbe_parse_ntuple_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_eth_ntuple_filter *filter, + struct rte_flow_error *error) +{ + int ret; + + ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error); + + if (ret) + return ret; + + /* txgbe doesn't support tcp flags */ + if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "Not supported by ntuple filter"); + return -rte_errno; + } + + /* txgbe doesn't support many priorities */ + if (filter->priority < TXGBE_MIN_N_TUPLE_PRIO || + filter->priority > TXGBE_MAX_N_TUPLE_PRIO) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "Priority not supported by ntuple filter"); + return -rte_errno; + } + + if (filter->queue >= dev->data->nb_rx_queues) + return -rte_errno; + + /* fixed value for txgbe */ + filter->flags = RTE_5TUPLE_FLAGS; + return 0; +} + From patchwork Tue Nov 3 10:07:45 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 83524 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 7808FA0521; Tue, 3 Nov 2020 11:08:30 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 993E1C8E2; Tue, 3 Nov 2020 11:07:07 +0100 (CET) Received: from smtpbgbr2.qq.com (smtpbgbr2.qq.com [54.207.22.56]) by dpdk.org (Postfix) with ESMTP id 2AAD1C8BC for ; Tue, 3 Nov 2020 11:07:02 +0100 (CET) X-QQ-mid: bizesmtp26t1604398017tlcj664r Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp10.qq.com (ESMTP) with id ; Tue, 03 Nov 2020 18:06:56 +0800 (CST) X-QQ-SSF: 01400000002000C0C000B00A0000000 X-QQ-FEAT: vXHWdJQTAMeLcdFmXhsUTaeqra+8UoniN/ZiCnQ3lGzXlZ5JtRfQk2MiWFacR 59/pePgCSdtxWivXottceGsGWng1sByHulQzdVEgabcHnLrQx1mZpx3oqJ6nGwFYRGNw2Fa 3Kx2B78LdNRHjvpe4NSEhe3QfvaDxBxKwMv/UB+tkXChySiiJslo1ijaTzb/seruebQGoCo g/laNk9YYC9W0ZxSq0MFOWr29UJgnUK36w/kUDMDX7i+R0BokZRJUSOVWlZkcXa6ylzYEdZ j+rLJX0xHZVQ5qECikcHB9ax8B9XM0iymxE/YSyzRuBIAeZh7AxTMxjbdTDaump/ij0rRuL H3uALai0aQ0THWhFuc= X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Tue, 3 Nov 2020 18:07:45 +0800 Message-Id: <20201103100818.311881-5-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20201103100818.311881-1-jiawenwu@trustnetic.com> References: <20201103100818.311881-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign6 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH 04/37] net/txgbe: support ntuple filter remove operaion X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Support remove operation on ntuple filter. Signed-off-by: Jiawen Wu --- drivers/net/txgbe/txgbe_ethdev.c | 11 +++++++++++ drivers/net/txgbe/txgbe_ethdev.h | 2 ++ 2 files changed, 13 insertions(+) diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c index fe32194a7..1804802e5 100644 --- a/drivers/net/txgbe/txgbe_ethdev.c +++ b/drivers/net/txgbe/txgbe_ethdev.c @@ -4524,6 +4524,17 @@ txgbe_filter_restore(struct rte_eth_dev *dev) return 0; } +/* remove all the n-tuple filters */ +void +txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev) +{ + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); + struct txgbe_5tuple_filter *p_5tuple; + + while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) + txgbe_remove_5tuple_filter(dev, p_5tuple); +} + static const struct eth_dev_ops txgbe_eth_dev_ops = { .dev_configure = txgbe_dev_configure, .dev_infos_get = txgbe_dev_info_get, diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h index caccf342d..c82f0c832 100644 --- a/drivers/net/txgbe/txgbe_ethdev.h +++ b/drivers/net/txgbe/txgbe_ethdev.h @@ -337,6 +337,8 @@ int txgbe_pf_host_configure(struct rte_eth_dev *eth_dev); uint32_t txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val); +void txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev); + int txgbe_vt_check(struct txgbe_hw *hw); int txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, uint16_t tx_rate, uint64_t q_msk); From patchwork Tue Nov 3 10:07:46 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 83523 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id AFE74A0521; Tue, 3 Nov 2020 11:08:08 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 17837C8D2; Tue, 3 Nov 2020 11:07:06 +0100 (CET) Received: from smtpbgeu1.qq.com (smtpbgeu1.qq.com [52.59.177.22]) by dpdk.org (Postfix) with ESMTP id B8DAAC8A6 for ; Tue, 3 Nov 2020 11:07:02 +0100 (CET) X-QQ-mid: bizesmtp26t1604398018tsxeui96 Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp10.qq.com (ESMTP) with id ; Tue, 03 Nov 2020 18:06:58 +0800 (CST) X-QQ-SSF: 01400000002000C0C000B00A0000000 X-QQ-FEAT: tHZj8EofyIUGsb1cc4XPfLnjd2YCxidt4JR6/d8VhyMjZh0XoTbHTS3umDAqH EGvSn69WoTd9M8YQY98Em+brADCdERulbzGuO4zPZKQiZOQhhA/vWcdZBs/W9Z4nTmJ+JDF H/uzQxW8tJp52N4AnJw72OMOpLAS2w17otDRwi/QbCqmxaKqqTDsbYjfuUDBHVK+QcSDTIQ +FcxHNyv47eG/3bCiGDfKdMeAqw30JYnbnLSE8lDmu/z05QMoFi5plDiPYTlxseLJlklTFm ZjI6x1OxDpTLxohtKaOeqLI0Eiubz9fG82jJ8CJ02QOf11hoMuDkU/uMctOQvSmJY5ySFwb Bzly6k+auNlCx8U1WVNgAtUGPWarQ== X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Tue, 3 Nov 2020 18:07:46 +0800 Message-Id: <20201103100818.311881-6-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20201103100818.311881-1-jiawenwu@trustnetic.com> References: <20201103100818.311881-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign5 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH 05/37] net/txgbe: support ethertype filter add and delete X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Support add and delete operaions on ethertype filter. Signed-off-by: Jiawen Wu --- drivers/net/txgbe/txgbe_ethdev.c | 111 +++++++++++++++++++++++++++++++ drivers/net/txgbe/txgbe_ethdev.h | 19 ++++++ 2 files changed, 130 insertions(+) diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c index 1804802e5..b479c9152 100644 --- a/drivers/net/txgbe/txgbe_ethdev.c +++ b/drivers/net/txgbe/txgbe_ethdev.c @@ -3983,6 +3983,77 @@ txgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, return 0; } +int +txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, + struct rte_eth_ethertype_filter *filter, + bool add) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); + uint32_t etqf = 0; + uint32_t etqs = 0; + int ret; + struct txgbe_ethertype_filter ethertype_filter; + + if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM) + return -EINVAL; + + if (filter->ether_type == RTE_ETHER_TYPE_IPV4 || + filter->ether_type == RTE_ETHER_TYPE_IPV6) { + PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" + " ethertype filter.", filter->ether_type); + return -EINVAL; + } + + if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { + PMD_DRV_LOG(ERR, "mac compare is unsupported."); + return -EINVAL; + } + if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { + PMD_DRV_LOG(ERR, "drop option is unsupported."); + return -EINVAL; + } + + ret = txgbe_ethertype_filter_lookup(filter_info, filter->ether_type); + if (ret >= 0 && add) { + PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.", + filter->ether_type); + return -EEXIST; + } + if (ret < 0 && !add) { + PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", + filter->ether_type); + return -ENOENT; + } + + if (add) { + etqf = TXGBE_ETFLT_ENA; + etqf |= TXGBE_ETFLT_ETID(filter->ether_type); + etqs |= TXGBE_ETCLS_QPID(filter->queue); + etqs |= TXGBE_ETCLS_QENA; + + ethertype_filter.ethertype = filter->ether_type; + ethertype_filter.etqf = etqf; + ethertype_filter.etqs = etqs; + ethertype_filter.conf = FALSE; + ret = txgbe_ethertype_filter_insert(filter_info, + ðertype_filter); + if (ret < 0) { + PMD_DRV_LOG(ERR, "ethertype filters are full."); + return -ENOSPC; + } + } else { + ret = txgbe_ethertype_filter_remove(filter_info, (uint8_t)ret); + if (ret < 0) + return -ENOSYS; + } + wr32(hw, TXGBE_ETFLT(ret), etqf); + wr32(hw, TXGBE_ETCLS(ret), etqs); + txgbe_flush(hw); + + return 0; +} + static u8 * txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq) @@ -4516,10 +4587,30 @@ txgbe_ntuple_filter_restore(struct rte_eth_dev *dev) } } +/* restore ethernet type filter */ +static inline void +txgbe_ethertype_filter_restore(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); + int i; + + for (i = 0; i < TXGBE_ETF_ID_MAX; i++) { + if (filter_info->ethertype_mask & (1 << i)) { + wr32(hw, TXGBE_ETFLT(i), + filter_info->ethertype_filters[i].etqf); + wr32(hw, TXGBE_ETCLS(i), + filter_info->ethertype_filters[i].etqs); + txgbe_flush(hw); + } + } +} + static int txgbe_filter_restore(struct rte_eth_dev *dev) { txgbe_ntuple_filter_restore(dev); + txgbe_ethertype_filter_restore(dev); return 0; } @@ -4535,6 +4626,26 @@ txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev) txgbe_remove_5tuple_filter(dev, p_5tuple); } +/* remove all the ether type filters */ +void +txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); + int i; + + for (i = 0; i < TXGBE_ETF_ID_MAX; i++) { + if (filter_info->ethertype_mask & (1 << i) && + !filter_info->ethertype_filters[i].conf) { + (void)txgbe_ethertype_filter_remove(filter_info, + (uint8_t)i); + wr32(hw, TXGBE_ETFLT(i), 0); + wr32(hw, TXGBE_ETCLS(i), 0); + txgbe_flush(hw); + } + } +} + static const struct eth_dev_ops txgbe_eth_dev_ops = { .dev_configure = txgbe_dev_configure, .dev_infos_get = txgbe_dev_info_get, diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h index c82f0c832..d89af2150 100644 --- a/drivers/net/txgbe/txgbe_ethdev.h +++ b/drivers/net/txgbe/txgbe_ethdev.h @@ -317,6 +317,10 @@ bool txgbe_rss_update_sp(enum txgbe_mac_type mac_type); int txgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, struct rte_eth_ntuple_filter *filter, bool add); +int txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, + struct rte_eth_ethertype_filter *filter, + bool add); + void txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction, uint8_t queue, uint8_t msix_vector); @@ -337,6 +341,7 @@ int txgbe_pf_host_configure(struct rte_eth_dev *eth_dev); uint32_t txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val); +void txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev); void txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev); int txgbe_vt_check(struct txgbe_hw *hw); @@ -382,6 +387,20 @@ txgbe_ethertype_filter_insert(struct txgbe_filter_info *filter_info, return (i < TXGBE_ETF_ID_MAX ? i : -1); } +static inline int +txgbe_ethertype_filter_remove(struct txgbe_filter_info *filter_info, + uint8_t idx) +{ + if (idx >= TXGBE_ETF_ID_MAX) + return -1; + filter_info->ethertype_mask &= ~(1 << idx); + filter_info->ethertype_filters[idx].ethertype = 0; + filter_info->ethertype_filters[idx].etqf = 0; + filter_info->ethertype_filters[idx].etqs = 0; + filter_info->ethertype_filters[idx].etqs = FALSE; + return idx; +} + /* High threshold controlling when to start sending XOFF frames. */ #define TXGBE_FC_XOFF_HITH 128 /*KB*/ /* Low threshold controlling when to start sending XON frames. */ From patchwork Tue Nov 3 10:07:47 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 83535 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id B4B79A0521; Tue, 3 Nov 2020 11:12:25 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 51EE5C9C8; Tue, 3 Nov 2020 11:07:26 +0100 (CET) Received: from smtpbgsg3.qq.com (smtpbgsg3.qq.com [54.179.177.220]) by dpdk.org (Postfix) with ESMTP id 6C9D428EE for ; Tue, 3 Nov 2020 11:07:16 +0100 (CET) X-QQ-mid: bizesmtp26t1604398019tnhqiek8 Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp10.qq.com (ESMTP) with id ; Tue, 03 Nov 2020 18:06:59 +0800 (CST) X-QQ-SSF: 01400000002000C0C000B00A0000000 X-QQ-FEAT: 9bYt9YTkLgt2+I9Lpb9pd2TFMme3rm6fvAcwA6FwVz6jBVxq+zjDI2RVFDet4 1ScjzoZzxN7/x3AYVwTSgfiZK9Eu+7alhus1Wv2+RwY38+UOjomRk/f+FYhiSVwQJZndkV0 6ZVLhR2FYseG2TRaB1sRqMKn4EFwvdKnDW06yJHWKO4H6NyQf6kqgEW3Ua4kOdqREsB9GkQ wC2/GQBl4KQheI8OHUlbYhS2G6twMKny4Koj9YbsM6ZI21lbSx17zMPxTISP+mvDIpG1yZi so091l5jznnRDls36H1dNqw3+PlF7ZR5JHOrsZvs48p1y3lr1FYBv51cwDAGps4/rmm+uVm uj9qlM2wZMxYrRR4ZshOtxkzUzSFQ== X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Tue, 3 Nov 2020 18:07:47 +0800 Message-Id: <20201103100818.311881-7-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20201103100818.311881-1-jiawenwu@trustnetic.com> References: <20201103100818.311881-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign5 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH 06/37] net/txgbe: add ethertype parse rule X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add support to parse flow for ethertype filter. Signed-off-by: Jiawen Wu --- drivers/net/txgbe/txgbe_flow.c | 250 +++++++++++++++++++++++++++++++++ 1 file changed, 250 insertions(+) diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c index 6f8be3b7f..fc2505ddc 100644 --- a/drivers/net/txgbe/txgbe_flow.c +++ b/drivers/net/txgbe/txgbe_flow.c @@ -534,3 +534,253 @@ txgbe_parse_ntuple_filter(struct rte_eth_dev *dev, return 0; } +/** + * Parse the rule to see if it is a ethertype rule. + * And get the ethertype filter info BTW. + * pattern: + * The first not void item can be ETH. + * The next not void item must be END. + * action: + * The first not void action should be QUEUE. + * The next not void action should be END. + * pattern example: + * ITEM Spec Mask + * ETH type 0x0807 0xFFFF + * END + * other members in mask and spec should set to 0x00. + * item->last should be NULL. + */ +static int +cons_parse_ethertype_filter(const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action *actions, + struct rte_eth_ethertype_filter *filter, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item; + const struct rte_flow_action *act; + const struct rte_flow_item_eth *eth_spec; + const struct rte_flow_item_eth *eth_mask; + const struct rte_flow_action_queue *act_q; + + if (!pattern) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, "NULL pattern."); + return -rte_errno; + } + + if (!actions) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, + NULL, "NULL action."); + return -rte_errno; + } + + if (!attr) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + NULL, "NULL attribute."); + return -rte_errno; + } + + item = next_no_void_pattern(pattern, NULL); + /* The first non-void item should be MAC. */ + if (item->type != RTE_FLOW_ITEM_TYPE_ETH) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ethertype filter"); + return -rte_errno; + } + + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + /* Get the MAC info. */ + if (!item->spec || !item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ethertype filter"); + return -rte_errno; + } + + eth_spec = item->spec; + eth_mask = item->mask; + + /* Mask bits of source MAC address must be full of 0. + * Mask bits of destination MAC address must be full + * of 1 or full of 0. + */ + if (!rte_is_zero_ether_addr(ð_mask->src) || + (!rte_is_zero_ether_addr(ð_mask->dst) && + !rte_is_broadcast_ether_addr(ð_mask->dst))) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid ether address mask"); + return -rte_errno; + } + + if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid ethertype mask"); + return -rte_errno; + } + + /* If mask bits of destination MAC address + * are full of 1, set RTE_ETHTYPE_FLAGS_MAC. + */ + if (rte_is_broadcast_ether_addr(ð_mask->dst)) { + filter->mac_addr = eth_spec->dst; + filter->flags |= RTE_ETHTYPE_FLAGS_MAC; + } else { + filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC; + } + filter->ether_type = rte_be_to_cpu_16(eth_spec->type); + + /* Check if the next non-void item is END. */ + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_END) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ethertype filter."); + return -rte_errno; + } + + /* Parse action */ + + act = next_no_void_action(actions, NULL); + if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE && + act->type != RTE_FLOW_ACTION_TYPE_DROP) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) { + act_q = (const struct rte_flow_action_queue *)act->conf; + filter->queue = act_q->index; + } else { + filter->flags |= RTE_ETHTYPE_FLAGS_DROP; + } + + /* Check if the next non-void item is END */ + act = next_no_void_action(actions, act); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + /* Parse attr */ + /* Must be input direction */ + if (!attr->ingress) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, "Only support ingress."); + return -rte_errno; + } + + /* Not supported */ + if (attr->egress) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + attr, "Not support egress."); + return -rte_errno; + } + + /* Not supported */ + if (attr->transfer) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + + /* Not supported */ + if (attr->priority) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Not support priority."); + return -rte_errno; + } + + /* Not supported */ + if (attr->group) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, + attr, "Not support group."); + return -rte_errno; + } + + return 0; +} + +static int +txgbe_parse_ethertype_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_eth_ethertype_filter *filter, + struct rte_flow_error *error) +{ + int ret; + + ret = cons_parse_ethertype_filter(attr, pattern, + actions, filter, error); + + if (ret) + return ret; + + /* txgbe doesn't support MAC address. */ + if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { + memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "Not supported by ethertype filter"); + return -rte_errno; + } + + if (filter->queue >= dev->data->nb_rx_queues) { + memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "queue index much too big"); + return -rte_errno; + } + + if (filter->ether_type == RTE_ETHER_TYPE_IPV4 || + filter->ether_type == RTE_ETHER_TYPE_IPV6) { + memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "IPv4/IPv6 not supported by ethertype filter"); + return -rte_errno; + } + + if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { + memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "mac compare is unsupported"); + return -rte_errno; + } + + if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { + memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "drop option is unsupported"); + return -rte_errno; + } + + return 0; +} + From patchwork Tue Nov 3 10:07:48 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 83527 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 5A4B1A0521; Tue, 3 Nov 2020 11:09:37 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id A8234C922; Tue, 3 Nov 2020 11:07:12 +0100 (CET) Received: from smtpbgsg2.qq.com (smtpbgsg2.qq.com [54.254.200.128]) by dpdk.org (Postfix) with ESMTP id 65D97C8DC for ; Tue, 3 Nov 2020 11:07:06 +0100 (CET) X-QQ-mid: bizesmtp26t1604398020txn7l51n Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp10.qq.com (ESMTP) with id ; Tue, 03 Nov 2020 18:07:00 +0800 (CST) X-QQ-SSF: 01400000002000C0C000B00A0000000 X-QQ-FEAT: lRUSrEWtKQCHN/J+HiTxB9kAVfU1foHcEMQC9rlL7awG8C0EbJxMmx+wLTV6S uH1ttcerAZE1oN4QKX3eYeReFJJfaSZdQlFGLPrzH7jrvpPp4bTBSflllomv8MjENXUWfTx IJCt9v8oOjg1oWqanN9Vc7Y++wiZ/uQVe/qJXLG8kpP51WVk6w4/6I3FRWMGbBVwuuH1xBo lnYuqu8llLSmPdw0f3CL98ohx0N4ivktYBlJK6qXAU4eIGnkJoannOdO8kniYMnev8padSQ sx7KQo+yXuLi/n2BBzo+cDbLhSLTbhGP13gwtPhlBqwlt8LMpYDUwclHGbYINjorzZWnboh i/PdkLObDR6unJDRFA= X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Tue, 3 Nov 2020 18:07:48 +0800 Message-Id: <20201103100818.311881-8-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20201103100818.311881-1-jiawenwu@trustnetic.com> References: <20201103100818.311881-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign7 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH 07/37] net/txgbe: support syn filter add and delete X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Support add and delete operations on syn filter. Signed-off-by: Jiawen Wu --- drivers/net/txgbe/txgbe_ethdev.c | 70 ++++++++++++++++++++++++++++++++ drivers/net/txgbe/txgbe_ethdev.h | 6 +++ 2 files changed, 76 insertions(+) diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c index b479c9152..e2599e429 100644 --- a/drivers/net/txgbe/txgbe_ethdev.c +++ b/drivers/net/txgbe/txgbe_ethdev.c @@ -3696,6 +3696,44 @@ txgbe_set_queue_rate_limit(struct rte_eth_dev *dev, return 0; } +int +txgbe_syn_filter_set(struct rte_eth_dev *dev, + struct rte_eth_syn_filter *filter, + bool add) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); + uint32_t syn_info; + uint32_t synqf; + + if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM) + return -EINVAL; + + syn_info = filter_info->syn_info; + + if (add) { + if (syn_info & TXGBE_SYNCLS_ENA) + return -EINVAL; + synqf = (uint32_t)TXGBE_SYNCLS_QPID(filter->queue); + synqf |= TXGBE_SYNCLS_ENA; + + if (filter->hig_pri) + synqf |= TXGBE_SYNCLS_HIPRIO; + else + synqf &= ~TXGBE_SYNCLS_HIPRIO; + } else { + synqf = rd32(hw, TXGBE_SYNCLS); + if (!(syn_info & TXGBE_SYNCLS_ENA)) + return -ENOENT; + synqf &= ~(TXGBE_SYNCLS_QPID_MASK | TXGBE_SYNCLS_ENA); + } + + filter_info->syn_info = synqf; + wr32(hw, TXGBE_SYNCLS, synqf); + txgbe_flush(hw); + return 0; +} + static inline enum txgbe_5tuple_protocol convert_protocol_type(uint8_t protocol_value) { @@ -4606,11 +4644,28 @@ txgbe_ethertype_filter_restore(struct rte_eth_dev *dev) } } +/* restore SYN filter */ +static inline void +txgbe_syn_filter_restore(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); + uint32_t synqf; + + synqf = filter_info->syn_info; + + if (synqf & TXGBE_SYNCLS_ENA) { + wr32(hw, TXGBE_SYNCLS, synqf); + txgbe_flush(hw); + } +} + static int txgbe_filter_restore(struct rte_eth_dev *dev) { txgbe_ntuple_filter_restore(dev); txgbe_ethertype_filter_restore(dev); + txgbe_syn_filter_restore(dev); return 0; } @@ -4646,6 +4701,21 @@ txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev) } } +/* remove the SYN filter */ +void +txgbe_clear_syn_filter(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); + + if (filter_info->syn_info & TXGBE_SYNCLS_ENA) { + filter_info->syn_info = 0; + + wr32(hw, TXGBE_SYNCLS, 0); + txgbe_flush(hw); + } +} + static const struct eth_dev_ops txgbe_eth_dev_ops = { .dev_configure = txgbe_dev_configure, .dev_infos_get = txgbe_dev_info_get, diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h index d89af2150..bb53dd74a 100644 --- a/drivers/net/txgbe/txgbe_ethdev.h +++ b/drivers/net/txgbe/txgbe_ethdev.h @@ -164,6 +164,8 @@ struct txgbe_filter_info { /* Bit mask for every used 5tuple filter */ uint32_t fivetuple_mask[TXGBE_5TUPLE_ARRAY_SIZE]; struct txgbe_5tuple_filter_list fivetuple_list; + /* store the SYN filter info */ + uint32_t syn_info; }; /* The configuration of bandwidth */ @@ -320,6 +322,9 @@ int txgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, int txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, struct rte_eth_ethertype_filter *filter, bool add); +int txgbe_syn_filter_set(struct rte_eth_dev *dev, + struct rte_eth_syn_filter *filter, + bool add); void txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction, uint8_t queue, uint8_t msix_vector); @@ -343,6 +348,7 @@ uint32_t txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val); void txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev); void txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev); +void txgbe_clear_syn_filter(struct rte_eth_dev *dev); int txgbe_vt_check(struct txgbe_hw *hw); int txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, From patchwork Tue Nov 3 10:07:49 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 83526 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 0231FA0521; Tue, 3 Nov 2020 11:09:18 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id AB384C90C; Tue, 3 Nov 2020 11:07:10 +0100 (CET) Received: from smtpbgeu1.qq.com (smtpbgeu1.qq.com [52.59.177.22]) by dpdk.org (Postfix) with ESMTP id 5A167C8F2 for ; Tue, 3 Nov 2020 11:07:07 +0100 (CET) X-QQ-mid: bizesmtp26t1604398022tklya7bb Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp10.qq.com (ESMTP) with id ; Tue, 03 Nov 2020 18:07:02 +0800 (CST) X-QQ-SSF: 01400000002000C0C000B00A0000000 X-QQ-FEAT: oLU221C22Z84Izvxb4X99GoIpMwhSdLkPEG7wBq/IqcXqGzVlCvSTMo9hM6T2 CHR6xHfIFeZZF7K3Rx+onIbgB49kqXHmISZLylHdhOelmPIkXy1NnL7QcxIBW9QAAWV2mmV A0sVFheusaL/qgqw/u00LIeRE4Ma4BRXROeh+jjPnvjd1CktkviGGR99JtoLQ6AQHeeNCTB As/nK39TLbEOJF/vb2E11HB5o4Zgb0Fcfc3QvircbjuXt3CYibInjYgI1584hpvJtmHHdW8 QLWQveNeFT9XzHWtmyp88z+34aVw9TYDEAJPExBuCBkNrbMNXQlTJyrWkRPR54QO6R7tniY 2uPneWJtGKNYLacfv0= X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Tue, 3 Nov 2020 18:07:49 +0800 Message-Id: <20201103100818.311881-9-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20201103100818.311881-1-jiawenwu@trustnetic.com> References: <20201103100818.311881-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign5 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH 08/37] net/txgbe: add syn filter parse rule X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add support to parse flow for syn filter. Signed-off-by: Jiawen Wu --- drivers/net/txgbe/txgbe_flow.c | 256 +++++++++++++++++++++++++++++++++ 1 file changed, 256 insertions(+) diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c index fc2505ddc..7110e594b 100644 --- a/drivers/net/txgbe/txgbe_flow.c +++ b/drivers/net/txgbe/txgbe_flow.c @@ -784,3 +784,259 @@ txgbe_parse_ethertype_filter(struct rte_eth_dev *dev, return 0; } +/** + * Parse the rule to see if it is a TCP SYN rule. + * And get the TCP SYN filter info BTW. + * pattern: + * The first not void item must be ETH. + * The second not void item must be IPV4 or IPV6. + * The third not void item must be TCP. + * The next not void item must be END. + * action: + * The first not void action should be QUEUE. + * The next not void action should be END. + * pattern example: + * ITEM Spec Mask + * ETH NULL NULL + * IPV4/IPV6 NULL NULL + * TCP tcp_flags 0x02 0xFF + * END + * other members in mask and spec should set to 0x00. + * item->last should be NULL. + */ +static int +cons_parse_syn_filter(const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_eth_syn_filter *filter, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item; + const struct rte_flow_action *act; + const struct rte_flow_item_tcp *tcp_spec; + const struct rte_flow_item_tcp *tcp_mask; + const struct rte_flow_action_queue *act_q; + + if (!pattern) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, "NULL pattern."); + return -rte_errno; + } + + if (!actions) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, + NULL, "NULL action."); + return -rte_errno; + } + + if (!attr) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + NULL, "NULL attribute."); + return -rte_errno; + } + + + /* the first not void item should be MAC or IPv4 or IPv6 or TCP */ + item = next_no_void_pattern(pattern, NULL); + if (item->type != RTE_FLOW_ITEM_TYPE_ETH && + item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_IPV6 && + item->type != RTE_FLOW_ITEM_TYPE_TCP) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by syn filter"); + return -rte_errno; + } + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + /* Skip Ethernet */ + if (item->type == RTE_FLOW_ITEM_TYPE_ETH) { + /* if the item is MAC, the content should be NULL */ + if (item->spec || item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid SYN address mask"); + return -rte_errno; + } + + /* check if the next not void item is IPv4 or IPv6 */ + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_IPV6) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by syn filter"); + return -rte_errno; + } + } + + /* Skip IP */ + if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 || + item->type == RTE_FLOW_ITEM_TYPE_IPV6) { + /* if the item is IP, the content should be NULL */ + if (item->spec || item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid SYN mask"); + return -rte_errno; + } + + /* check if the next not void item is TCP */ + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_TCP) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by syn filter"); + return -rte_errno; + } + } + + /* Get the TCP info. Only support SYN. */ + if (!item->spec || !item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid SYN mask"); + return -rte_errno; + } + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + tcp_spec = item->spec; + tcp_mask = item->mask; + if (!(tcp_spec->hdr.tcp_flags & RTE_TCP_SYN_FLAG) || + tcp_mask->hdr.src_port || + tcp_mask->hdr.dst_port || + tcp_mask->hdr.sent_seq || + tcp_mask->hdr.recv_ack || + tcp_mask->hdr.data_off || + tcp_mask->hdr.tcp_flags != RTE_TCP_SYN_FLAG || + tcp_mask->hdr.rx_win || + tcp_mask->hdr.cksum || + tcp_mask->hdr.tcp_urp) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by syn filter"); + return -rte_errno; + } + + /* check if the next not void item is END */ + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_END) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by syn filter"); + return -rte_errno; + } + + /* check if the first not void action is QUEUE. */ + act = next_no_void_action(actions, NULL); + if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + act_q = (const struct rte_flow_action_queue *)act->conf; + filter->queue = act_q->index; + if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + /* check if the next not void item is END */ + act = next_no_void_action(actions, act); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + /* parse attr */ + /* must be input direction */ + if (!attr->ingress) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, "Only support ingress."); + return -rte_errno; + } + + /* not supported */ + if (attr->egress) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + attr, "Not support egress."); + return -rte_errno; + } + + /* not supported */ + if (attr->transfer) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + + /* Support 2 priorities, the lowest or highest. */ + if (!attr->priority) { + filter->hig_pri = 0; + } else if (attr->priority == (uint32_t)~0U) { + filter->hig_pri = 1; + } else { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Not support priority."); + return -rte_errno; + } + + return 0; +} + +static int +txgbe_parse_syn_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_eth_syn_filter *filter, + struct rte_flow_error *error) +{ + int ret; + + ret = cons_parse_syn_filter(attr, pattern, + actions, filter, error); + + if (filter->queue >= dev->data->nb_rx_queues) + return -rte_errno; + + if (ret) + return ret; + + return 0; +} + From patchwork Tue Nov 3 10:07:50 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 83536 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 46CD5A0521; Tue, 3 Nov 2020 11:12:50 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id D6194C9D4; Tue, 3 Nov 2020 11:07:27 +0100 (CET) Received: from smtpbguseast2.qq.com (smtpbguseast2.qq.com [54.204.34.130]) by dpdk.org (Postfix) with ESMTP id 527841D9E for ; Tue, 3 Nov 2020 11:07:16 +0100 (CET) X-QQ-mid: bizesmtp26t1604398023tl4b9m0j Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp10.qq.com (ESMTP) with id ; Tue, 03 Nov 2020 18:07:03 +0800 (CST) X-QQ-SSF: 01400000002000C0C000B00A0000000 X-QQ-FEAT: vXHWdJQTAMdG4it17SLTlnJ3QuZpgVUdnaDEt127BTEMkg4x1/Xq2Ajnjba++ gxls5hfIU79FwtUQRowU24aK5zSLV7Lye1PC8783Al/Bed9zSDH1FuCqI/qQ4c9yAwpOH83 ZBbRpx25tyoeEP+4CfMSSsJTsivsqoniXzgOTi01SUUVM0VDG6gesPR/pRzxGjrpq1fPzl5 14Go6V01BgMRhInQBo6EEbDu9Hq7vIO4GTOvqniKrZoURKFwIBsOSG9UOjbSexy001sCdzQ CuenxxFU1auo6mrBjGhfU1sQpmcAFmwqBKnDQ/u0jCjsI6qQoCZXwkk9+HVcd9FRsRRAQVm 6v/BA//b3zU72b6arws00oR9Crq4g== X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Tue, 3 Nov 2020 18:07:50 +0800 Message-Id: <20201103100818.311881-10-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20201103100818.311881-1-jiawenwu@trustnetic.com> References: <20201103100818.311881-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign6 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH 09/37] net/txgbe: add L2 tunnel filter init and uninit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add L2 tunnel filter init and uninit. Signed-off-by: Jiawen Wu --- drivers/net/txgbe/txgbe_ethdev.c | 65 ++++++++++++++++++++++++++++++++ drivers/net/txgbe/txgbe_ethdev.h | 32 ++++++++++++++++ 2 files changed, 97 insertions(+) diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c index e2599e429..79f1f9535 100644 --- a/drivers/net/txgbe/txgbe_ethdev.c +++ b/drivers/net/txgbe/txgbe_ethdev.c @@ -88,6 +88,8 @@ static const struct reg_info *txgbe_regs_others[] = { txgbe_regs_diagnostic, NULL}; +static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev); +static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev); static int txgbe_dev_set_link_up(struct rte_eth_dev *dev); static int txgbe_dev_set_link_down(struct rte_eth_dev *dev); static int txgbe_dev_close(struct rte_eth_dev *dev); @@ -687,6 +689,9 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) /* initialize 5tuple filter list */ TAILQ_INIT(&filter_info->fivetuple_list); + /* initialize l2 tunnel filter list & hash */ + txgbe_l2_tn_filter_init(eth_dev); + /* initialize bandwidth configuration info */ memset(bw_conf, 0, sizeof(struct txgbe_bw_conf)); @@ -723,6 +728,63 @@ static int txgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev) return 0; } +static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev) +{ + struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev); + struct txgbe_l2_tn_filter *l2_tn_filter; + + if (l2_tn_info->hash_map) + rte_free(l2_tn_info->hash_map); + if (l2_tn_info->hash_handle) + rte_hash_free(l2_tn_info->hash_handle); + + while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) { + TAILQ_REMOVE(&l2_tn_info->l2_tn_list, + l2_tn_filter, + entries); + rte_free(l2_tn_filter); + } + + return 0; +} + +static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev) +{ + struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev); + char l2_tn_hash_name[RTE_HASH_NAMESIZE]; + struct rte_hash_parameters l2_tn_hash_params = { + .name = l2_tn_hash_name, + .entries = TXGBE_MAX_L2_TN_FILTER_NUM, + .key_len = sizeof(struct txgbe_l2_tn_key), + .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + .socket_id = rte_socket_id(), + }; + + TAILQ_INIT(&l2_tn_info->l2_tn_list); + snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE, + "l2_tn_%s", TDEV_NAME(eth_dev)); + l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params); + if (!l2_tn_info->hash_handle) { + PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!"); + return -EINVAL; + } + l2_tn_info->hash_map = rte_zmalloc("txgbe", + sizeof(struct txgbe_l2_tn_filter *) * + TXGBE_MAX_L2_TN_FILTER_NUM, + 0); + if (!l2_tn_info->hash_map) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory for L2 TN hash map!"); + return -ENOMEM; + } + l2_tn_info->e_tag_en = FALSE; + l2_tn_info->e_tag_fwd_en = FALSE; + l2_tn_info->e_tag_ether_type = RTE_ETHER_TYPE_ETAG; + + return 0; +} + static int eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_device *pci_dev) @@ -1802,6 +1864,9 @@ txgbe_dev_close(struct rte_eth_dev *dev) rte_free(dev->data->hash_mac_addrs); dev->data->hash_mac_addrs = NULL; + /* remove all the L2 tunnel filters & hash */ + txgbe_l2_tn_filter_uninit(dev); + /* Remove all ntuple filters of the device */ txgbe_ntuple_filter_uninit(dev); diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h index bb53dd74a..bd4ddab5a 100644 --- a/drivers/net/txgbe/txgbe_ethdev.h +++ b/drivers/net/txgbe/txgbe_ethdev.h @@ -9,7 +9,10 @@ #include "base/txgbe.h" #include "txgbe_ptypes.h" +#include #include +#include +#include /* need update link, bit flag */ #define TXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0) @@ -56,6 +59,8 @@ #define TXGBE_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET #define TXGBE_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET +#define TXGBE_MAX_L2_TN_FILTER_NUM 128 + /* structure for interrupt relative data */ struct txgbe_interrupt { uint32_t flags; @@ -168,6 +173,28 @@ struct txgbe_filter_info { uint32_t syn_info; }; +struct txgbe_l2_tn_key { + enum rte_eth_tunnel_type l2_tn_type; + uint32_t tn_id; +}; + +struct txgbe_l2_tn_filter { + TAILQ_ENTRY(txgbe_l2_tn_filter) entries; + struct txgbe_l2_tn_key key; + uint32_t pool; +}; + +TAILQ_HEAD(txgbe_l2_tn_filter_list, txgbe_l2_tn_filter); + +struct txgbe_l2_tn_info { + struct txgbe_l2_tn_filter_list l2_tn_list; + struct txgbe_l2_tn_filter **hash_map; + struct rte_hash *hash_handle; + bool e_tag_en; /* e-tag enabled */ + bool e_tag_fwd_en; /* e-tag based forwarding enabled */ + uint16_t e_tag_ether_type; /* ether type for e-tag */ +}; + /* The configuration of bandwidth */ struct txgbe_bw_conf { uint8_t tc_num; /* Number of TCs. */ @@ -188,6 +215,7 @@ struct txgbe_adapter { struct txgbe_vf_info *vfdata; struct txgbe_uta_info uta_info; struct txgbe_filter_info filter; + struct txgbe_l2_tn_info l2_tn; struct txgbe_bw_conf bw_conf; bool rx_bulk_alloc_allowed; struct rte_timecounter systime_tc; @@ -233,6 +261,10 @@ struct txgbe_adapter { #define TXGBE_DEV_FILTER(dev) \ (&((struct txgbe_adapter *)(dev)->data->dev_private)->filter) + +#define TXGBE_DEV_L2_TN(dev) \ + (&((struct txgbe_adapter *)(dev)->data->dev_private)->l2_tn) + #define TXGBE_DEV_BW_CONF(dev) \ (&((struct txgbe_adapter *)(dev)->data->dev_private)->bw_conf) From patchwork Tue Nov 3 10:07:51 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 83528 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 50891A0521; Tue, 3 Nov 2020 11:09:57 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 40E95C92A; Tue, 3 Nov 2020 11:07:14 +0100 (CET) Received: from smtpbgbr2.qq.com (smtpbgbr2.qq.com [54.207.22.56]) by dpdk.org (Postfix) with ESMTP id BAC1CC918 for ; Tue, 3 Nov 2020 11:07:10 +0100 (CET) X-QQ-mid: bizesmtp26t1604398024teljwj2z Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp10.qq.com (ESMTP) with id ; Tue, 03 Nov 2020 18:07:04 +0800 (CST) X-QQ-SSF: 01400000002000C0C000B00A0000000 X-QQ-FEAT: Znu4Ocv3ljbQWRM0O9yxKJXn29GM5ZoCtXRH8o/Bue0K8gFJPwVzKQX/qR+yu cz8IU7Ph2zKxUmDPFAvlT4CMQzfOfzGKpzFzHgRLQ+UmTrS110Gr6NJTwv/urboJLGac2PC qj3qIY42gBDrvmAGO3cKWdnVA/YDhotUSM1y9MViehaYFsblSXH/jtrmrmKwJEJ54trkzq+ d0Y9W9DOQGvhUgY9QXahFgU7Ux2YuuSgIwAtZMvPi+KywpvK3U3Gxvd6PZ/L3O3m8qrPbou X4y96orhW5kVVe4RALnN9oUTvvS4N9xfLTHE3IswaQcH4ETtoh/B/wYWJOI/4NjcNMFf/eJ QbJqtCkGNT+2foUvgw= X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Tue, 3 Nov 2020 18:07:51 +0800 Message-Id: <20201103100818.311881-11-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20201103100818.311881-1-jiawenwu@trustnetic.com> References: <20201103100818.311881-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign5 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH 10/37] net/txgbe: config L2 tunnel filter with e-tag X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Config L2 tunnel filter with e-tag. Signed-off-by: Jiawen Wu --- drivers/net/txgbe/txgbe_ethdev.c | 63 ++++++++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c index 79f1f9535..0be894b7b 100644 --- a/drivers/net/txgbe/txgbe_ethdev.c +++ b/drivers/net/txgbe/txgbe_ethdev.c @@ -112,6 +112,7 @@ static void txgbe_dev_interrupt_delayed_handler(void *param); static void txgbe_configure_msix(struct rte_eth_dev *dev); static int txgbe_filter_restore(struct rte_eth_dev *dev); +static void txgbe_l2_tunnel_conf(struct rte_eth_dev *dev); #define TXGBE_SET_HWSTRIP(h, q) do {\ uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ @@ -1675,6 +1676,7 @@ txgbe_dev_start(struct rte_eth_dev *dev) /* resume enabled intr since hw reset */ txgbe_enable_intr(dev); + txgbe_l2_tunnel_conf(dev); txgbe_filter_restore(dev); /* @@ -4678,6 +4680,52 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev, return 0; } +/* Update e-tag ether type */ +static int +txgbe_update_e_tag_eth_type(struct txgbe_hw *hw, + uint16_t ether_type) +{ + uint32_t etag_etype; + + etag_etype = rd32(hw, TXGBE_EXTAG); + etag_etype &= ~TXGBE_EXTAG_ETAG_MASK; + etag_etype |= ether_type; + wr32(hw, TXGBE_EXTAG, etag_etype); + txgbe_flush(hw); + + return 0; +} + +/* Enable e-tag tunnel */ +static int +txgbe_e_tag_enable(struct txgbe_hw *hw) +{ + uint32_t etag_etype; + + etag_etype = rd32(hw, TXGBE_PORTCTL); + etag_etype |= TXGBE_PORTCTL_ETAG; + wr32(hw, TXGBE_PORTCTL, etag_etype); + txgbe_flush(hw); + + return 0; +} + +static int +txgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en) +{ + int ret = 0; + uint32_t ctrl; + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + + ctrl = rd32(hw, TXGBE_POOLCTL); + ctrl &= ~TXGBE_POOLCTL_MODE_MASK; + if (en) + ctrl |= TXGBE_PSRPOOL_MODE_ETAG; + wr32(hw, TXGBE_POOLCTL, ctrl); + + return ret; +} + /* restore n-tuple filter */ static inline void txgbe_ntuple_filter_restore(struct rte_eth_dev *dev) @@ -4735,6 +4783,21 @@ txgbe_filter_restore(struct rte_eth_dev *dev) return 0; } +static void +txgbe_l2_tunnel_conf(struct rte_eth_dev *dev) +{ + struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev); + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + + if (l2_tn_info->e_tag_en) + (void)txgbe_e_tag_enable(hw); + + if (l2_tn_info->e_tag_fwd_en) + (void)txgbe_e_tag_forwarding_en_dis(dev, 1); + + (void)txgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type); +} + /* remove all the n-tuple filters */ void txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev) From patchwork Tue Nov 3 10:07:52 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 83530 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 4492DA0521; Tue, 3 Nov 2020 11:10:33 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id A8E465AA7; Tue, 3 Nov 2020 11:07:17 +0100 (CET) Received: from smtpbgau1.qq.com (smtpbgau1.qq.com [54.206.16.166]) by dpdk.org (Postfix) with ESMTP id 94FB4C90E for ; Tue, 3 Nov 2020 11:07:11 +0100 (CET) X-QQ-mid: bizesmtp26t1604398026te634z2y Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp10.qq.com (ESMTP) with id ; Tue, 03 Nov 2020 18:07:05 +0800 (CST) X-QQ-SSF: 01400000002000C0C000B00A0000000 X-QQ-FEAT: mAJfWfDYrJPIXOPR3XkPHfV/3jCqwT2dPcqzLo2VUzHYXrEwCMJUT4quayRYW IvfuQww8Ec2sywNyK8NxjbbnJoXAOXbIWsrjFlRpSUzg+bqL+xm+zGz02PbekRksK+jGRhY ow9CS+r4jypHuKhEOtqupsFtiMcWLSnh/Af9ik/T5vbn39Dylsq8oHjQ4/ppn7zLUMpdLdE zrm9q18cHUtKPoX1EBtEuBFa3FfepZLwb0mF1+Vuxol7nMRMoLF8O7Fm8qTWAgsAQwqSmDj IcuCeLwhWCenbBiMMOusCciEw7op0fwem4f4f/jwYdyVTg1krDVKAqublXlpZ3EsGaoMBfd C40u5nHNuNY5BEqEBM= X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Tue, 3 Nov 2020 18:07:52 +0800 Message-Id: <20201103100818.311881-12-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20201103100818.311881-1-jiawenwu@trustnetic.com> References: <20201103100818.311881-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign7 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH 11/37] net/txgbe: support L2 tunnel filter add and delete X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Support L2 tunnel filter add and delete operations. Signed-off-by: Jiawen Wu --- drivers/net/txgbe/txgbe_ethdev.c | 230 +++++++++++++++++++++++++++++++ drivers/net/txgbe/txgbe_ethdev.h | 18 +++ 2 files changed, 248 insertions(+) diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c index 0be894b7b..94fa03182 100644 --- a/drivers/net/txgbe/txgbe_ethdev.c +++ b/drivers/net/txgbe/txgbe_ethdev.c @@ -4710,6 +4710,219 @@ txgbe_e_tag_enable(struct txgbe_hw *hw) return 0; } +static int +txgbe_e_tag_filter_del(struct rte_eth_dev *dev, + struct txgbe_l2_tunnel_conf *l2_tunnel) +{ + int ret = 0; + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + uint32_t i, rar_entries; + uint32_t rar_low, rar_high; + + rar_entries = hw->mac.num_rar_entries; + + for (i = 1; i < rar_entries; i++) { + wr32(hw, TXGBE_ETHADDRIDX, i); + rar_high = rd32(hw, TXGBE_ETHADDRH); + rar_low = rd32(hw, TXGBE_ETHADDRL); + if ((rar_high & TXGBE_ETHADDRH_VLD) && + (rar_high & TXGBE_ETHADDRH_ETAG) && + (TXGBE_ETHADDRL_ETAG(rar_low) == + l2_tunnel->tunnel_id)) { + wr32(hw, TXGBE_ETHADDRL, 0); + wr32(hw, TXGBE_ETHADDRH, 0); + + txgbe_clear_vmdq(hw, i, BIT_MASK32); + + return ret; + } + } + + return ret; +} + +static int +txgbe_e_tag_filter_add(struct rte_eth_dev *dev, + struct txgbe_l2_tunnel_conf *l2_tunnel) +{ + int ret = 0; + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + uint32_t i, rar_entries; + uint32_t rar_low, rar_high; + + /* One entry for one tunnel. Try to remove potential existing entry. */ + txgbe_e_tag_filter_del(dev, l2_tunnel); + + rar_entries = hw->mac.num_rar_entries; + + for (i = 1; i < rar_entries; i++) { + wr32(hw, TXGBE_ETHADDRIDX, i); + rar_high = rd32(hw, TXGBE_ETHADDRH); + if (rar_high & TXGBE_ETHADDRH_VLD) { + continue; + } else { + txgbe_set_vmdq(hw, i, l2_tunnel->pool); + rar_high = TXGBE_ETHADDRH_VLD | TXGBE_ETHADDRH_ETAG; + rar_low = l2_tunnel->tunnel_id; + + wr32(hw, TXGBE_ETHADDRL, rar_low); + wr32(hw, TXGBE_ETHADDRH, rar_high); + + return ret; + } + } + + PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full." + " Please remove a rule before adding a new one."); + return -EINVAL; +} + +static inline struct txgbe_l2_tn_filter * +txgbe_l2_tn_filter_lookup(struct txgbe_l2_tn_info *l2_tn_info, + struct txgbe_l2_tn_key *key) +{ + int ret; + + ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key); + if (ret < 0) + return NULL; + + return l2_tn_info->hash_map[ret]; +} + +static inline int +txgbe_insert_l2_tn_filter(struct txgbe_l2_tn_info *l2_tn_info, + struct txgbe_l2_tn_filter *l2_tn_filter) +{ + int ret; + + ret = rte_hash_add_key(l2_tn_info->hash_handle, + &l2_tn_filter->key); + + if (ret < 0) { + PMD_DRV_LOG(ERR, + "Failed to insert L2 tunnel filter" + " to hash table %d!", + ret); + return ret; + } + + l2_tn_info->hash_map[ret] = l2_tn_filter; + + TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries); + + return 0; +} + +static inline int +txgbe_remove_l2_tn_filter(struct txgbe_l2_tn_info *l2_tn_info, + struct txgbe_l2_tn_key *key) +{ + int ret; + struct txgbe_l2_tn_filter *l2_tn_filter; + + ret = rte_hash_del_key(l2_tn_info->hash_handle, key); + + if (ret < 0) { + PMD_DRV_LOG(ERR, + "No such L2 tunnel filter to delete %d!", + ret); + return ret; + } + + l2_tn_filter = l2_tn_info->hash_map[ret]; + l2_tn_info->hash_map[ret] = NULL; + + TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries); + rte_free(l2_tn_filter); + + return 0; +} + +/* Add l2 tunnel filter */ +int +txgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev, + struct txgbe_l2_tunnel_conf *l2_tunnel, + bool restore) +{ + int ret; + struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev); + struct txgbe_l2_tn_key key; + struct txgbe_l2_tn_filter *node; + + if (!restore) { + key.l2_tn_type = l2_tunnel->l2_tunnel_type; + key.tn_id = l2_tunnel->tunnel_id; + + node = txgbe_l2_tn_filter_lookup(l2_tn_info, &key); + + if (node) { + PMD_DRV_LOG(ERR, + "The L2 tunnel filter already exists!"); + return -EINVAL; + } + + node = rte_zmalloc("txgbe_l2_tn", + sizeof(struct txgbe_l2_tn_filter), + 0); + if (!node) + return -ENOMEM; + + rte_memcpy(&node->key, + &key, + sizeof(struct txgbe_l2_tn_key)); + node->pool = l2_tunnel->pool; + ret = txgbe_insert_l2_tn_filter(l2_tn_info, node); + if (ret < 0) { + rte_free(node); + return ret; + } + } + + switch (l2_tunnel->l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + ret = txgbe_e_tag_filter_add(dev, l2_tunnel); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + if (!restore && ret < 0) + (void)txgbe_remove_l2_tn_filter(l2_tn_info, &key); + + return ret; +} + +/* Delete l2 tunnel filter */ +int +txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev, + struct txgbe_l2_tunnel_conf *l2_tunnel) +{ + int ret; + struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev); + struct txgbe_l2_tn_key key; + + key.l2_tn_type = l2_tunnel->l2_tunnel_type; + key.tn_id = l2_tunnel->tunnel_id; + ret = txgbe_remove_l2_tn_filter(l2_tn_info, &key); + if (ret < 0) + return ret; + + switch (l2_tunnel->l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + ret = txgbe_e_tag_filter_del(dev, l2_tunnel); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + return ret; +} + static int txgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en) { @@ -4773,12 +4986,29 @@ txgbe_syn_filter_restore(struct rte_eth_dev *dev) } } +/* restore L2 tunnel filter */ +static inline void +txgbe_l2_tn_filter_restore(struct rte_eth_dev *dev) +{ + struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev); + struct txgbe_l2_tn_filter *node; + struct txgbe_l2_tunnel_conf l2_tn_conf; + + TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) { + l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type; + l2_tn_conf.tunnel_id = node->key.tn_id; + l2_tn_conf.pool = node->pool; + (void)txgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE); + } +} + static int txgbe_filter_restore(struct rte_eth_dev *dev) { txgbe_ntuple_filter_restore(dev); txgbe_ethertype_filter_restore(dev); txgbe_syn_filter_restore(dev); + txgbe_l2_tn_filter_restore(dev); return 0; } diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h index bd4ddab5a..577fc616c 100644 --- a/drivers/net/txgbe/txgbe_ethdev.h +++ b/drivers/net/txgbe/txgbe_ethdev.h @@ -358,6 +358,24 @@ int txgbe_syn_filter_set(struct rte_eth_dev *dev, struct rte_eth_syn_filter *filter, bool add); +/** + * l2 tunnel configuration. + */ +struct txgbe_l2_tunnel_conf { + enum rte_eth_tunnel_type l2_tunnel_type; + uint16_t ether_type; /* ether type in l2 header */ + uint32_t tunnel_id; /* port tag id for e-tag */ + uint16_t vf_id; /* VF id for tag insertion */ + uint32_t pool; /* destination pool for tag based forwarding */ +}; + +int +txgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev, + struct txgbe_l2_tunnel_conf *l2_tunnel, + bool restore); +int +txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev, + struct txgbe_l2_tunnel_conf *l2_tunnel); void txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction, uint8_t queue, uint8_t msix_vector); From patchwork Tue Nov 3 10:07:53 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 83531 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id E3EB7A0521; Tue, 3 Nov 2020 11:10:52 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 4847DC962; Tue, 3 Nov 2020 11:07:19 +0100 (CET) Received: from smtpbgau1.qq.com (smtpbgau1.qq.com [54.206.16.166]) by dpdk.org (Postfix) with ESMTP id 7EBBCC930 for ; Tue, 3 Nov 2020 11:07:13 +0100 (CET) X-QQ-mid: bizesmtp26t1604398027tojvjecx Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp10.qq.com (ESMTP) with id ; Tue, 03 Nov 2020 18:07:06 +0800 (CST) X-QQ-SSF: 01400000002000C0C000B00A0000000 X-QQ-FEAT: YKCDl5A3/aojpO7CS+lA4mrs7xnmwa31KwRcKyDC0IdrAnfFhRCzadxOF3/WM 6FymIZXP0p8xVu+aT6QKdtMorOEgFXclTVsvPlY+jE7D4jVkVoN5RO0UOqBybRjoZho7xVu iO/rJGAvdAhT3Y7RtIqGWc49oKkUJYjFgh6sZejyhybjTTgLV3X9Ak8hziEBJ1SKdMs0dqe HTLOScNZRNwp1Bm4njx6VwOxzR2LPnkMbnV7kVcGn6dV7PFgLvcPooJ7IGQwe7nnU8a8iV0 Q81eas6uH3nwAqHQaW2Cjb191A8WwQ1DPKT8zfI1U3aWrkmGz/1H0QJtp2Drr877tD711Ei P0HtRwv/3YE7AQnnjgQ577Ii/2AtQ== X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Tue, 3 Nov 2020 18:07:53 +0800 Message-Id: <20201103100818.311881-13-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20201103100818.311881-1-jiawenwu@trustnetic.com> References: <20201103100818.311881-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign7 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH 12/37] net/txgbe: add L2 tunnel filter parse rule X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add support to parse flow for L2 tunnel filter. Signed-off-by: Jiawen Wu --- drivers/net/txgbe/txgbe_flow.c | 202 +++++++++++++++++++++++++++++++++ 1 file changed, 202 insertions(+) diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c index 7110e594b..8589e3328 100644 --- a/drivers/net/txgbe/txgbe_flow.c +++ b/drivers/net/txgbe/txgbe_flow.c @@ -15,6 +15,8 @@ #include #include +#include +#include #include #include @@ -1040,3 +1042,203 @@ txgbe_parse_syn_filter(struct rte_eth_dev *dev, return 0; } +/** + * Parse the rule to see if it is a L2 tunnel rule. + * And get the L2 tunnel filter info BTW. + * Only support E-tag now. + * pattern: + * The first not void item can be E_TAG. + * The next not void item must be END. + * action: + * The first not void action should be VF or PF. + * The next not void action should be END. + * pattern example: + * ITEM Spec Mask + * E_TAG grp 0x1 0x3 + e_cid_base 0x309 0xFFF + * END + * other members in mask and spec should set to 0x00. + * item->last should be NULL. + */ +static int +cons_parse_l2_tn_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct txgbe_l2_tunnel_conf *filter, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item; + const struct rte_flow_item_e_tag *e_tag_spec; + const struct rte_flow_item_e_tag *e_tag_mask; + const struct rte_flow_action *act; + const struct rte_flow_action_vf *act_vf; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + + if (!pattern) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, "NULL pattern."); + return -rte_errno; + } + + if (!actions) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, + NULL, "NULL action."); + return -rte_errno; + } + + if (!attr) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + NULL, "NULL attribute."); + return -rte_errno; + } + + /* The first not void item should be e-tag. */ + item = next_no_void_pattern(pattern, NULL); + if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) { + memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by L2 tunnel filter"); + return -rte_errno; + } + + if (!item->spec || !item->mask) { + memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by L2 tunnel filter"); + return -rte_errno; + } + + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + e_tag_spec = item->spec; + e_tag_mask = item->mask; + + /* Only care about GRP and E cid base. */ + if (e_tag_mask->epcp_edei_in_ecid_b || + e_tag_mask->in_ecid_e || + e_tag_mask->ecid_e || + e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) { + memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by L2 tunnel filter"); + return -rte_errno; + } + + filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG; + /** + * grp and e_cid_base are bit fields and only use 14 bits. + * e-tag id is taken as little endian by HW. + */ + filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b); + + /* check if the next not void item is END */ + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_END) { + memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by L2 tunnel filter"); + return -rte_errno; + } + + /* parse attr */ + /* must be input direction */ + if (!attr->ingress) { + memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, "Only support ingress."); + return -rte_errno; + } + + /* not supported */ + if (attr->egress) { + memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + attr, "Not support egress."); + return -rte_errno; + } + + /* not supported */ + if (attr->transfer) { + memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + + /* not supported */ + if (attr->priority) { + memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Not support priority."); + return -rte_errno; + } + + /* check if the first not void action is VF or PF. */ + act = next_no_void_action(actions, NULL); + if (act->type != RTE_FLOW_ACTION_TYPE_VF && + act->type != RTE_FLOW_ACTION_TYPE_PF) { + memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + if (act->type == RTE_FLOW_ACTION_TYPE_VF) { + act_vf = (const struct rte_flow_action_vf *)act->conf; + filter->pool = act_vf->id; + } else { + filter->pool = pci_dev->max_vfs; + } + + /* check if the next not void item is END */ + act = next_no_void_action(actions, act); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + return 0; +} + +static int +txgbe_parse_l2_tn_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct txgbe_l2_tunnel_conf *l2_tn_filter, + struct rte_flow_error *error) +{ + int ret = 0; + + ret = cons_parse_l2_tn_filter(dev, attr, pattern, + actions, l2_tn_filter, error); + + memset(l2_tn_filter, 0, sizeof(struct txgbe_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "Not supported by L2 tunnel filter"); + ret = -rte_errno; + return ret; +} + From patchwork Tue Nov 3 10:07:54 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 83529 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 4EDA9A0521; Tue, 3 Nov 2020 11:10:17 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id C4042C93C; Tue, 3 Nov 2020 11:07:15 +0100 (CET) Received: from smtpproxy21.qq.com (smtpbg704.qq.com [203.205.195.105]) by dpdk.org (Postfix) with ESMTP id 30773C91A for ; Tue, 3 Nov 2020 11:07:12 +0100 (CET) X-QQ-mid: bizesmtp26t1604398028tv2rndks Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp10.qq.com (ESMTP) with id ; Tue, 03 Nov 2020 18:07:08 +0800 (CST) X-QQ-SSF: 01400000002000C0C000B00A0000000 X-QQ-FEAT: y3iK4Lsvf4Ayx7k1HIHkO994c79BNDVfQ1m8tB4Wv1oPINCv1Lvgq5l73/Z6i mmgFdUFVv3zvUcWIXxn3rV7hJjhUXYR1cgV0DS1i9OnVP3wGUCLSLPSqyr9JHmVA3dFImwq muoY1X10pcdSwP5G6fWnvssnh3CCOpM4lsW23DOG0mnal6fckU2cApYaeQLT87aJ5Uu2/w1 RKiqUqwhRDzY2+asS69bqgDMg/AupfwTZzBBfHVEuc6qMr5xenroWjVkrzjtMB+VJRC2t30 mY+nOSSBV2y2TqSSW+CcYq4J2LysISoq5+IFyTQlS3THYxbGBf4zZattGqCyX2UQ+xntTYA 3PQYHNl+IzAMN4B6Rw= X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Tue, 3 Nov 2020 18:07:54 +0800 Message-Id: <20201103100818.311881-14-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20201103100818.311881-1-jiawenwu@trustnetic.com> References: <20201103100818.311881-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign7 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH 13/37] net/txgbe: add FDIR filter init and uninit. X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add flow director filter init and uninit operations. Signed-off-by: Jiawen Wu --- drivers/net/txgbe/base/txgbe_type.h | 29 +++++++++++++ drivers/net/txgbe/txgbe_ethdev.c | 63 +++++++++++++++++++++++++++++ drivers/net/txgbe/txgbe_ethdev.h | 51 +++++++++++++++++++++++ 3 files changed, 143 insertions(+) diff --git a/drivers/net/txgbe/base/txgbe_type.h b/drivers/net/txgbe/base/txgbe_type.h index 69aa8993a..b9d31ab83 100644 --- a/drivers/net/txgbe/base/txgbe_type.h +++ b/drivers/net/txgbe/base/txgbe_type.h @@ -67,6 +67,35 @@ enum { #define TXGBE_ATR_HASH_MASK 0x7fff +/* Flow Director ATR input struct. */ +struct txgbe_atr_input { + /* + * Byte layout in order, all values with MSB first: + * + * vm_pool - 1 byte + * flow_type - 1 byte + * vlan_id - 2 bytes + * src_ip - 16 bytes + * inner_mac - 6 bytes + * cloud_mode - 2 bytes + * tni_vni - 4 bytes + * dst_ip - 16 bytes + * src_port - 2 bytes + * dst_port - 2 bytes + * flex_bytes - 2 bytes + * bkt_hash - 2 bytes + */ + u8 vm_pool; + u8 flow_type; + __be16 pkt_type; + __be32 dst_ip[4]; + __be32 src_ip[4]; + __be16 src_port; + __be16 dst_port; + __be16 flex_bytes; + __be16 bkt_hash; +}; + enum txgbe_eeprom_type { txgbe_eeprom_unknown = 0, txgbe_eeprom_spi, diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c index 94fa03182..4f4e51fe1 100644 --- a/drivers/net/txgbe/txgbe_ethdev.c +++ b/drivers/net/txgbe/txgbe_ethdev.c @@ -88,6 +88,8 @@ static const struct reg_info *txgbe_regs_others[] = { txgbe_regs_diagnostic, NULL}; +static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev); +static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev); static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev); static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev); static int txgbe_dev_set_link_up(struct rte_eth_dev *dev); @@ -690,6 +692,9 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) /* initialize 5tuple filter list */ TAILQ_INIT(&filter_info->fivetuple_list); + /* initialize flow director filter list & hash */ + txgbe_fdir_filter_init(eth_dev); + /* initialize l2 tunnel filter list & hash */ txgbe_l2_tn_filter_init(eth_dev); @@ -729,6 +734,26 @@ static int txgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev) return 0; } +static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev) +{ + struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev); + struct txgbe_fdir_filter *fdir_filter; + + if (fdir_info->hash_map) + rte_free(fdir_info->hash_map); + if (fdir_info->hash_handle) + rte_hash_free(fdir_info->hash_handle); + + while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) { + TAILQ_REMOVE(&fdir_info->fdir_list, + fdir_filter, + entries); + rte_free(fdir_filter); + } + + return 0; +} + static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev) { struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev); @@ -749,6 +774,41 @@ static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev) return 0; } +static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev) +{ + struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev); + char fdir_hash_name[RTE_HASH_NAMESIZE]; + struct rte_hash_parameters fdir_hash_params = { + .name = fdir_hash_name, + .entries = TXGBE_MAX_FDIR_FILTER_NUM, + .key_len = sizeof(struct txgbe_atr_input), + .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + .socket_id = rte_socket_id(), + }; + + TAILQ_INIT(&fdir_info->fdir_list); + snprintf(fdir_hash_name, RTE_HASH_NAMESIZE, + "fdir_%s", TDEV_NAME(eth_dev)); + fdir_info->hash_handle = rte_hash_create(&fdir_hash_params); + if (!fdir_info->hash_handle) { + PMD_INIT_LOG(ERR, "Failed to create fdir hash table!"); + return -EINVAL; + } + fdir_info->hash_map = rte_zmalloc("txgbe", + sizeof(struct txgbe_fdir_filter *) * + TXGBE_MAX_FDIR_FILTER_NUM, + 0); + if (!fdir_info->hash_map) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory for fdir hash map!"); + return -ENOMEM; + } + fdir_info->mask_added = FALSE; + + return 0; +} + static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev) { struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev); @@ -1866,6 +1926,9 @@ txgbe_dev_close(struct rte_eth_dev *dev) rte_free(dev->data->hash_mac_addrs); dev->data->hash_mac_addrs = NULL; + /* remove all the fdir filters & hash */ + txgbe_fdir_filter_uninit(dev); + /* remove all the L2 tunnel filters & hash */ txgbe_l2_tn_filter_uninit(dev); diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h index 577fc616c..059b0164b 100644 --- a/drivers/net/txgbe/txgbe_ethdev.h +++ b/drivers/net/txgbe/txgbe_ethdev.h @@ -59,8 +59,55 @@ #define TXGBE_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET #define TXGBE_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET +#define TXGBE_MAX_FDIR_FILTER_NUM (1024 * 32) #define TXGBE_MAX_L2_TN_FILTER_NUM 128 +/* + * Information about the fdir mode. + */ +struct txgbe_hw_fdir_mask { + uint16_t vlan_tci_mask; + uint32_t src_ipv4_mask; + uint32_t dst_ipv4_mask; + uint16_t src_ipv6_mask; + uint16_t dst_ipv6_mask; + uint16_t src_port_mask; + uint16_t dst_port_mask; + uint16_t flex_bytes_mask; + uint8_t mac_addr_byte_mask; + uint32_t tunnel_id_mask; + uint8_t tunnel_type_mask; +}; + +struct txgbe_fdir_filter { + TAILQ_ENTRY(txgbe_fdir_filter) entries; + struct txgbe_atr_input input; /* key of fdir filter*/ + uint32_t fdirflags; /* drop or forward */ + uint32_t fdirhash; /* hash value for fdir */ + uint8_t queue; /* assigned rx queue */ +}; + +/* list of fdir filters */ +TAILQ_HEAD(txgbe_fdir_filter_list, txgbe_fdir_filter); + +struct txgbe_hw_fdir_info { + struct txgbe_hw_fdir_mask mask; + uint8_t flex_bytes_offset; + uint16_t collision; + uint16_t free; + uint16_t maxhash; + uint8_t maxlen; + uint64_t add; + uint64_t remove; + uint64_t f_add; + uint64_t f_remove; + struct txgbe_fdir_filter_list fdir_list; /* filter list*/ + /* store the pointers of the filters, index is the hash value. */ + struct txgbe_fdir_filter **hash_map; + struct rte_hash *hash_handle; /* cuckoo hash handler */ + bool mask_added; /* If already got mask from consistent filter */ +}; + /* structure for interrupt relative data */ struct txgbe_interrupt { uint32_t flags; @@ -206,6 +253,7 @@ struct txgbe_bw_conf { struct txgbe_adapter { struct txgbe_hw hw; struct txgbe_hw_stats stats; + struct txgbe_hw_fdir_info fdir; struct txgbe_interrupt intr; struct txgbe_stat_mappings stat_mappings; struct txgbe_vfta shadow_vfta; @@ -238,6 +286,9 @@ struct txgbe_adapter { #define TXGBE_DEV_INTR(dev) \ (&((struct txgbe_adapter *)(dev)->data->dev_private)->intr) +#define TXGBE_DEV_FDIR(dev) \ + (&((struct txgbe_adapter *)(dev)->data->dev_private)->fdir) + #define TXGBE_DEV_STAT_MAPPINGS(dev) \ (&((struct txgbe_adapter *)(dev)->data->dev_private)->stat_mappings) From patchwork Tue Nov 3 10:07:55 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 83532 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id D54B9A0521; Tue, 3 Nov 2020 11:11:34 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 9EEE5C982; Tue, 3 Nov 2020 11:07:22 +0100 (CET) Received: from smtpbgau1.qq.com (smtpbgau1.qq.com [54.206.16.166]) by dpdk.org (Postfix) with ESMTP id 53146C934 for ; Tue, 3 Nov 2020 11:07:14 +0100 (CET) X-QQ-mid: bizesmtp26t1604398029t0jhgvd8 Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp10.qq.com (ESMTP) with id ; Tue, 03 Nov 2020 18:07:09 +0800 (CST) X-QQ-SSF: 01400000002000C0C000B00A0000000 X-QQ-FEAT: y3iK4Lsvf4DLBFpWOzG1pekwXDZ6Yxlf9PS3uBMjHsh/Ozhe6H2lE2taoqZVr Gxcs2FGpi//OVHIB52cBAax+4hAtmm4TwCkIPbowzZxk2POX1CwB+7sUiH62OeZtjz3oHf4 reYS5uwd6NMgNZY22TDKHSaccGyNKuBJlzJAOHgdv+VqmVyH7tOXlJBD1O8BavOV8g7OS87 Ybuk+No5zx09/8Req9ilabnRZxpOtrvw5T0TXkmPS2YhwNkUqoreBNevOHZejXI8LLCwTE3 PEJd1ZV6ApD+EOooPV75unPBG2ktIe4rD9j8pao0QxvXiuquWLs9cGy23asT+zbE6gXrW0l t+HLza2R4HVjuw6y7PvuMTiMARX3RILYqoMxXRu X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Tue, 3 Nov 2020 18:07:55 +0800 Message-Id: <20201103100818.311881-15-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20201103100818.311881-1-jiawenwu@trustnetic.com> References: <20201103100818.311881-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign5 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH 14/37] net/txgbe: configure FDIR filter X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Configure flow director filter with it enabled. Signed-off-by: Jiawen Wu --- drivers/net/txgbe/base/txgbe_type.h | 6 + drivers/net/txgbe/meson.build | 1 + drivers/net/txgbe/txgbe_ethdev.c | 6 + drivers/net/txgbe/txgbe_ethdev.h | 6 + drivers/net/txgbe/txgbe_fdir.c | 407 ++++++++++++++++++++++++++++ 5 files changed, 426 insertions(+) create mode 100644 drivers/net/txgbe/txgbe_fdir.c diff --git a/drivers/net/txgbe/base/txgbe_type.h b/drivers/net/txgbe/base/txgbe_type.h index b9d31ab83..633692cd7 100644 --- a/drivers/net/txgbe/base/txgbe_type.h +++ b/drivers/net/txgbe/base/txgbe_type.h @@ -21,6 +21,8 @@ #define TXGBE_MAX_QP (128) #define TXGBE_MAX_UTA 128 +#define TXGBE_FDIR_INIT_DONE_POLL 10 + #define TXGBE_ALIGN 128 /* as intel did */ #include "txgbe_status.h" @@ -65,6 +67,10 @@ enum { #define TXGBE_PHYSICAL_LAYER_10BASE_T 0x08000 #define TXGBE_PHYSICAL_LAYER_2500BASE_KX 0x10000 +/* Software ATR hash keys */ +#define TXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2 +#define TXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614 + #define TXGBE_ATR_HASH_MASK 0x7fff /* Flow Director ATR input struct. */ diff --git a/drivers/net/txgbe/meson.build b/drivers/net/txgbe/meson.build index 45379175d..bb1683631 100644 --- a/drivers/net/txgbe/meson.build +++ b/drivers/net/txgbe/meson.build @@ -6,6 +6,7 @@ objs = [base_objs] sources = files( 'txgbe_ethdev.c', + 'txgbe_fdir.c', 'txgbe_flow.c', 'txgbe_ptypes.c', 'txgbe_pf.c', diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c index 4f4e51fe1..df2efe80f 100644 --- a/drivers/net/txgbe/txgbe_ethdev.c +++ b/drivers/net/txgbe/txgbe_ethdev.c @@ -1633,6 +1633,12 @@ txgbe_dev_start(struct rte_eth_dev *dev) txgbe_configure_port(dev); txgbe_configure_dcb(dev); + if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) { + err = txgbe_fdir_configure(dev); + if (err) + goto error; + } + /* Restore vf rate limit */ if (vfinfo != NULL) { for (vf = 0; vf < pci_dev->max_vfs; vf++) diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h index 059b0164b..5a5d9c4d3 100644 --- a/drivers/net/txgbe/txgbe_ethdev.h +++ b/drivers/net/txgbe/txgbe_ethdev.h @@ -13,6 +13,7 @@ #include #include #include +#include /* need update link, bit flag */ #define TXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0) @@ -430,6 +431,11 @@ txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev, void txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction, uint8_t queue, uint8_t msix_vector); +/* + * Flow director function prototypes + */ +int txgbe_fdir_configure(struct rte_eth_dev *dev); +int txgbe_fdir_set_input_mask(struct rte_eth_dev *dev); void txgbe_configure_pb(struct rte_eth_dev *dev); void txgbe_configure_port(struct rte_eth_dev *dev); void txgbe_configure_dcb(struct rte_eth_dev *dev); diff --git a/drivers/net/txgbe/txgbe_fdir.c b/drivers/net/txgbe/txgbe_fdir.c new file mode 100644 index 000000000..df6125d4a --- /dev/null +++ b/drivers/net/txgbe/txgbe_fdir.c @@ -0,0 +1,407 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2015-2020 + */ + +#include +#include +#include +#include +#include + + +#include "txgbe_logs.h" +#include "base/txgbe.h" +#include "txgbe_ethdev.h" + +#define TXGBE_DEFAULT_FLEXBYTES_OFFSET 12 /*default flexbytes offset in bytes*/ +#define TXGBE_MAX_FLX_SOURCE_OFF 62 + +#define IPV6_ADDR_TO_MASK(ipaddr, ipv6m) do { \ + uint8_t ipv6_addr[16]; \ + uint8_t i; \ + rte_memcpy(ipv6_addr, (ipaddr), sizeof(ipv6_addr));\ + (ipv6m) = 0; \ + for (i = 0; i < sizeof(ipv6_addr); i++) { \ + if (ipv6_addr[i] == UINT8_MAX) \ + (ipv6m) |= 1 << i; \ + else if (ipv6_addr[i] != 0) { \ + PMD_DRV_LOG(ERR, " invalid IPv6 address mask."); \ + return -EINVAL; \ + } \ + } \ +} while (0) + +#define IPV6_MASK_TO_ADDR(ipv6m, ipaddr) do { \ + uint8_t ipv6_addr[16]; \ + uint8_t i; \ + for (i = 0; i < sizeof(ipv6_addr); i++) { \ + if ((ipv6m) & (1 << i)) \ + ipv6_addr[i] = UINT8_MAX; \ + else \ + ipv6_addr[i] = 0; \ + } \ + rte_memcpy((ipaddr), ipv6_addr, sizeof(ipv6_addr));\ +} while (0) + +/** + * Initialize Flow Director control registers + * @hw: pointer to hardware structure + * @fdirctrl: value to write to flow director control register + **/ +static int +txgbe_fdir_enable(struct txgbe_hw *hw, uint32_t fdirctrl) +{ + int i; + + PMD_INIT_FUNC_TRACE(); + + /* Prime the keys for hashing */ + wr32(hw, TXGBE_FDIRBKTHKEY, TXGBE_ATR_BUCKET_HASH_KEY); + wr32(hw, TXGBE_FDIRSIGHKEY, TXGBE_ATR_SIGNATURE_HASH_KEY); + + /* + * Continue setup of fdirctrl register bits: + * Set the maximum length per hash bucket to 0xA filters + * Send interrupt when 64 filters are left + */ + fdirctrl |= TXGBE_FDIRCTL_MAXLEN(0xA) | + TXGBE_FDIRCTL_FULLTHR(4); + + /* + * Poll init-done after we write the register. Estimated times: + * 10G: PBALLOC = 11b, timing is 60us + * 1G: PBALLOC = 11b, timing is 600us + * 100M: PBALLOC = 11b, timing is 6ms + * + * Multiple these timings by 4 if under full Rx load + * + * So we'll poll for TXGBE_FDIR_INIT_DONE_POLL times, sleeping for + * 1 msec per poll time. If we're at line rate and drop to 100M, then + * this might not finish in our poll time, but we can live with that + * for now. + */ + wr32(hw, TXGBE_FDIRCTL, fdirctrl); + txgbe_flush(hw); + for (i = 0; i < TXGBE_FDIR_INIT_DONE_POLL; i++) { + if (rd32(hw, TXGBE_FDIRCTL) & TXGBE_FDIRCTL_INITDONE) + break; + msec_delay(1); + } + + if (i >= TXGBE_FDIR_INIT_DONE_POLL) { + PMD_INIT_LOG(ERR, "Flow Director poll time exceeded during enabling!"); + return -ETIMEDOUT; + } + return 0; +} + +/* + * Set appropriate bits in fdirctrl for: variable reporting levels, moving + * flexbytes matching field, and drop queue (only for perfect matching mode). + */ +static inline int +configure_fdir_flags(const struct rte_fdir_conf *conf, + uint32_t *fdirctrl, uint32_t *flex) +{ + *fdirctrl = 0; + *flex = 0; + + switch (conf->pballoc) { + case RTE_FDIR_PBALLOC_64K: + /* 8k - 1 signature filters */ + *fdirctrl |= TXGBE_FDIRCTL_BUF_64K; + break; + case RTE_FDIR_PBALLOC_128K: + /* 16k - 1 signature filters */ + *fdirctrl |= TXGBE_FDIRCTL_BUF_128K; + break; + case RTE_FDIR_PBALLOC_256K: + /* 32k - 1 signature filters */ + *fdirctrl |= TXGBE_FDIRCTL_BUF_256K; + break; + default: + /* bad value */ + PMD_INIT_LOG(ERR, "Invalid fdir_conf->pballoc value"); + return -EINVAL; + }; + + /* status flags: write hash & swindex in the rx descriptor */ + switch (conf->status) { + case RTE_FDIR_NO_REPORT_STATUS: + /* do nothing, default mode */ + break; + case RTE_FDIR_REPORT_STATUS: + /* report status when the packet matches a fdir rule */ + *fdirctrl |= TXGBE_FDIRCTL_REPORT_MATCH; + break; + case RTE_FDIR_REPORT_STATUS_ALWAYS: + /* always report status */ + *fdirctrl |= TXGBE_FDIRCTL_REPORT_ALWAYS; + break; + default: + /* bad value */ + PMD_INIT_LOG(ERR, "Invalid fdir_conf->status value"); + return -EINVAL; + }; + + *flex |= TXGBE_FDIRFLEXCFG_BASE_MAC; + *flex |= TXGBE_FDIRFLEXCFG_OFST(TXGBE_DEFAULT_FLEXBYTES_OFFSET / 2); + + switch (conf->mode) { + case RTE_FDIR_MODE_SIGNATURE: + break; + case RTE_FDIR_MODE_PERFECT: + *fdirctrl |= TXGBE_FDIRCTL_PERFECT; + *fdirctrl |= TXGBE_FDIRCTL_DROPQP(conf->drop_queue); + break; + default: + /* bad value */ + PMD_INIT_LOG(ERR, "Invalid fdir_conf->mode value"); + return -EINVAL; + } + + return 0; +} + +static inline uint32_t +reverse_fdir_bmks(uint16_t hi_dword, uint16_t lo_dword) +{ + uint32_t mask = hi_dword << 16; + + mask |= lo_dword; + mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); + mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2); + mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4); + return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8); +} + +int +txgbe_fdir_set_input_mask(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_hw_fdir_info *info = TXGBE_DEV_FDIR(dev); + enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode; + /* + * mask VM pool and DIPv6 since there are currently not supported + * mask FLEX byte, it will be set in flex_conf + */ + uint32_t fdirm = TXGBE_FDIRMSK_POOL; + uint32_t fdirtcpm; /* TCP source and destination port masks. */ + uint32_t fdiripv6m; /* IPv6 source and destination masks. */ + + PMD_INIT_FUNC_TRACE(); + + if (mode != RTE_FDIR_MODE_SIGNATURE && + mode != RTE_FDIR_MODE_PERFECT) { + PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode); + return -ENOTSUP; + } + + /* + * Program the relevant mask registers. If src/dst_port or src/dst_addr + * are zero, then assume a full mask for that field. Also assume that + * a VLAN of 0 is unspecified, so mask that out as well. L4type + * cannot be masked out in this implementation. + */ + if (info->mask.dst_port_mask == 0 && info->mask.src_port_mask == 0) { + /* use the L4 protocol mask for raw IPv4/IPv6 traffic */ + fdirm |= TXGBE_FDIRMSK_L4P; + } + + /* TBD: don't support encapsulation yet */ + wr32(hw, TXGBE_FDIRMSK, fdirm); + + /* store the TCP/UDP port masks, bit reversed from port layout */ + fdirtcpm = reverse_fdir_bmks(rte_be_to_cpu_16(info->mask.dst_port_mask), + rte_be_to_cpu_16(info->mask.src_port_mask)); + + /* write all the same so that UDP, TCP and SCTP use the same mask + * (little-endian) + */ + wr32(hw, TXGBE_FDIRTCPMSK, ~fdirtcpm); + wr32(hw, TXGBE_FDIRUDPMSK, ~fdirtcpm); + wr32(hw, TXGBE_FDIRSCTPMSK, ~fdirtcpm); + + /* Store source and destination IPv4 masks (big-endian) */ + wr32(hw, TXGBE_FDIRSIP4MSK, ~info->mask.src_ipv4_mask); + wr32(hw, TXGBE_FDIRDIP4MSK, ~info->mask.dst_ipv4_mask); + + if (mode == RTE_FDIR_MODE_SIGNATURE) { + /* + * Store source and destination IPv6 masks (bit reversed) + */ + fdiripv6m = TXGBE_FDIRIP6MSK_DST(info->mask.dst_ipv6_mask) | + TXGBE_FDIRIP6MSK_SRC(info->mask.src_ipv6_mask); + + wr32(hw, TXGBE_FDIRIP6MSK, ~fdiripv6m); + } + + return 0; +} + +static int +txgbe_fdir_store_input_mask(struct rte_eth_dev *dev) +{ + struct rte_eth_fdir_masks *input_mask = + &dev->data->dev_conf.fdir_conf.mask; + enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode; + struct txgbe_hw_fdir_info *info = TXGBE_DEV_FDIR(dev); + uint16_t dst_ipv6m = 0; + uint16_t src_ipv6m = 0; + + if (mode != RTE_FDIR_MODE_SIGNATURE && + mode != RTE_FDIR_MODE_PERFECT) { + PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode); + return -ENOTSUP; + } + + memset(&info->mask, 0, sizeof(struct txgbe_hw_fdir_mask)); + info->mask.vlan_tci_mask = input_mask->vlan_tci_mask; + info->mask.src_port_mask = input_mask->src_port_mask; + info->mask.dst_port_mask = input_mask->dst_port_mask; + info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip; + info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip; + IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.src_ip, src_ipv6m); + IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.dst_ip, dst_ipv6m); + info->mask.src_ipv6_mask = src_ipv6m; + info->mask.dst_ipv6_mask = dst_ipv6m; + + return 0; +} + +/* + * txgbe_check_fdir_flex_conf -check if the flex payload and mask configuration + * arguments are valid + */ +static int +txgbe_set_fdir_flex_conf(struct rte_eth_dev *dev, uint32_t flex) +{ + const struct rte_eth_fdir_flex_conf *conf = + &dev->data->dev_conf.fdir_conf.flex_conf; + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_hw_fdir_info *info = TXGBE_DEV_FDIR(dev); + const struct rte_eth_flex_payload_cfg *flex_cfg; + const struct rte_eth_fdir_flex_mask *flex_mask; + uint16_t flexbytes = 0; + uint16_t i; + + if (conf == NULL) { + PMD_DRV_LOG(ERR, "NULL pointer."); + return -EINVAL; + } + + flex |= TXGBE_FDIRFLEXCFG_DIA; + + for (i = 0; i < conf->nb_payloads; i++) { + flex_cfg = &conf->flex_set[i]; + if (flex_cfg->type != RTE_ETH_RAW_PAYLOAD) { + PMD_DRV_LOG(ERR, "unsupported payload type."); + return -EINVAL; + } + if (((flex_cfg->src_offset[0] & 0x1) == 0) && + (flex_cfg->src_offset[1] == flex_cfg->src_offset[0] + 1) && + flex_cfg->src_offset[0] <= TXGBE_MAX_FLX_SOURCE_OFF) { + flex &= ~TXGBE_FDIRFLEXCFG_OFST_MASK; + flex |= + TXGBE_FDIRFLEXCFG_OFST(flex_cfg->src_offset[0] / 2); + } else { + PMD_DRV_LOG(ERR, "invalid flexbytes arguments."); + return -EINVAL; + } + } + + for (i = 0; i < conf->nb_flexmasks; i++) { + flex_mask = &conf->flex_mask[i]; + if (flex_mask->flow_type != RTE_ETH_FLOW_UNKNOWN) { + PMD_DRV_LOG(ERR, "flexmask should be set globally."); + return -EINVAL; + } + flexbytes = (uint16_t)(((flex_mask->mask[1] << 8) & 0xFF00) | + ((flex_mask->mask[0]) & 0xFF)); + if (flexbytes == UINT16_MAX) { + flex &= ~TXGBE_FDIRFLEXCFG_DIA; + } else if (flexbytes != 0) { + /* TXGBE_FDIRFLEXCFG_DIA is set by default when set mask */ + PMD_DRV_LOG(ERR, " invalid flexbytes mask arguments."); + return -EINVAL; + } + } + + info->mask.flex_bytes_mask = flexbytes ? UINT16_MAX : 0; + info->flex_bytes_offset = (uint8_t)(TXGBD_FDIRFLEXCFG_OFST(flex) * 2); + + for (i = 0; i < 64; i++) { + uint32_t flexreg; + flexreg = rd32(hw, TXGBE_FDIRFLEXCFG(i / 4)); + flexreg &= ~(TXGBE_FDIRFLEXCFG_ALL(~0UL, i % 4)); + flexreg |= TXGBE_FDIRFLEXCFG_ALL(flex, i % 4); + wr32(hw, TXGBE_FDIRFLEXCFG(i / 4), flexreg); + } + return 0; +} + +int +txgbe_fdir_configure(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + int err; + uint32_t fdirctrl, flex, pbsize; + int i; + enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode; + + PMD_INIT_FUNC_TRACE(); + + /* supports mac-vlan and tunnel mode */ + if (mode != RTE_FDIR_MODE_SIGNATURE && + mode != RTE_FDIR_MODE_PERFECT) + return -ENOSYS; + + err = configure_fdir_flags(&dev->data->dev_conf.fdir_conf, + &fdirctrl, &flex); + if (err) + return err; + + /* + * Before enabling Flow Director, the Rx Packet Buffer size + * must be reduced. The new value is the current size minus + * flow director memory usage size. + */ + pbsize = rd32(hw, TXGBE_PBRXSIZE(0)); + pbsize -= TXGBD_FDIRCTL_BUF_BYTE(fdirctrl); + wr32(hw, TXGBE_PBRXSIZE(0), pbsize); + + /* + * The defaults in the HW for RX PB 1-7 are not zero and so should be + * initialized to zero for non DCB mode otherwise actual total RX PB + * would be bigger than programmed and filter space would run into + * the PB 0 region. + */ + for (i = 1; i < 8; i++) + wr32(hw, TXGBE_PBRXSIZE(i), 0); + + err = txgbe_fdir_store_input_mask(dev); + if (err < 0) { + PMD_INIT_LOG(ERR, " Error on setting FD mask"); + return err; + } + + err = txgbe_fdir_set_input_mask(dev); + if (err < 0) { + PMD_INIT_LOG(ERR, " Error on setting FD mask"); + return err; + } + + err = txgbe_set_fdir_flex_conf(dev, flex); + if (err < 0) { + PMD_INIT_LOG(ERR, " Error on setting FD flexible arguments."); + return err; + } + + err = txgbe_fdir_enable(hw, fdirctrl); + if (err < 0) { + PMD_INIT_LOG(ERR, " Error on enabling FD."); + return err; + } + return 0; +} + From patchwork Tue Nov 3 10:07:56 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 83534 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id EB90AA0521; Tue, 3 Nov 2020 11:12:10 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id EB69EC9B4; Tue, 3 Nov 2020 11:07:24 +0100 (CET) Received: from smtpbgsg2.qq.com (smtpbgsg2.qq.com [54.254.200.128]) by dpdk.org (Postfix) with ESMTP id D1634C93E for ; Tue, 3 Nov 2020 11:07:14 +0100 (CET) X-QQ-mid: bizesmtp26t1604398031thcrisdl Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp10.qq.com (ESMTP) with id ; Tue, 03 Nov 2020 18:07:10 +0800 (CST) X-QQ-SSF: 01400000002000C0C000B00A0000000 X-QQ-FEAT: 1G/RtVmKuRW4WyTq3BAPiK2zcS1mxpjjDKSq/VEZOU51Jqu4tZgNziaIIHs3/ u06WzkwD0xvFwyKgXjnykgqJg5kfLpHyMQpFq1na4ptPHd8QsP4aHSMW2Y/nJv3YeoLsZY8 Zp4aUmz5ZBTGbysVHGZzX6ijIZOQWo4uX/3FScI0jTNI7R6XoCDSBR4RnuBG1SLPYV9HwHr qw9TBvlxKLG1Y+U+6p2RT4UdSoPGRVgJqwiklG2a6tMwa9Qk6jf2VMLSXgQNMf4CUyRBE6g /iZg+YvPbSrYU/EsrjzcEoLGbaNG55BqMTvHnRU/lIEjS96lhAn009X9Gzzj0EWs0yoBBTF velt2MvvOS8y4liocLBSaBtddCcJw== X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Tue, 3 Nov 2020 18:07:56 +0800 Message-Id: <20201103100818.311881-16-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20201103100818.311881-1-jiawenwu@trustnetic.com> References: <20201103100818.311881-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign7 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH 15/37] net/txgbe: support FDIR add and delete operations X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Support add and delete operations on flow director. Signed-off-by: Jiawen Wu --- drivers/net/txgbe/base/txgbe_type.h | 11 + drivers/net/txgbe/txgbe_ethdev.h | 16 + drivers/net/txgbe/txgbe_fdir.c | 472 +++++++++++++++++++++++++++- 3 files changed, 498 insertions(+), 1 deletion(-) diff --git a/drivers/net/txgbe/base/txgbe_type.h b/drivers/net/txgbe/base/txgbe_type.h index 633692cd7..160d5253a 100644 --- a/drivers/net/txgbe/base/txgbe_type.h +++ b/drivers/net/txgbe/base/txgbe_type.h @@ -22,6 +22,7 @@ #define TXGBE_MAX_UTA 128 #define TXGBE_FDIR_INIT_DONE_POLL 10 +#define TXGBE_FDIRCMD_CMD_POLL 10 #define TXGBE_ALIGN 128 /* as intel did */ @@ -71,7 +72,17 @@ enum { #define TXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2 #define TXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614 +/* Software ATR input stream values and masks */ #define TXGBE_ATR_HASH_MASK 0x7fff +#define TXGBE_ATR_L3TYPE_MASK 0x4 +#define TXGBE_ATR_L3TYPE_IPV4 0x0 +#define TXGBE_ATR_L3TYPE_IPV6 0x4 +#define TXGBE_ATR_L4TYPE_MASK 0x3 +#define TXGBE_ATR_L4TYPE_UDP 0x1 +#define TXGBE_ATR_L4TYPE_TCP 0x2 +#define TXGBE_ATR_L4TYPE_SCTP 0x3 +#define TXGBE_ATR_TUNNEL_MASK 0x10 +#define TXGBE_ATR_TUNNEL_ANY 0x10 /* Flow Director ATR input struct. */ struct txgbe_atr_input { diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h index 5a5d9c4d3..5e7fd0a86 100644 --- a/drivers/net/txgbe/txgbe_ethdev.h +++ b/drivers/net/txgbe/txgbe_ethdev.h @@ -91,6 +91,18 @@ struct txgbe_fdir_filter { /* list of fdir filters */ TAILQ_HEAD(txgbe_fdir_filter_list, txgbe_fdir_filter); +struct txgbe_fdir_rule { + struct txgbe_hw_fdir_mask mask; + struct txgbe_atr_input input; /* key of fdir filter */ + bool b_spec; /* If TRUE, input, fdirflags, queue have meaning. */ + bool b_mask; /* If TRUE, mask has meaning. */ + enum rte_fdir_mode mode; /* IP, MAC VLAN, Tunnel */ + uint32_t fdirflags; /* drop or forward */ + uint32_t soft_id; /* an unique value for this rule */ + uint8_t queue; /* assigned rx queue */ + uint8_t flex_bytes_offset; +}; + struct txgbe_hw_fdir_info { struct txgbe_hw_fdir_mask mask; uint8_t flex_bytes_offset; @@ -436,6 +448,10 @@ void txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction, */ int txgbe_fdir_configure(struct rte_eth_dev *dev); int txgbe_fdir_set_input_mask(struct rte_eth_dev *dev); +int txgbe_fdir_filter_program(struct rte_eth_dev *dev, + struct txgbe_fdir_rule *rule, + bool del, bool update); + void txgbe_configure_pb(struct rte_eth_dev *dev); void txgbe_configure_port(struct rte_eth_dev *dev); void txgbe_configure_dcb(struct rte_eth_dev *dev); diff --git a/drivers/net/txgbe/txgbe_fdir.c b/drivers/net/txgbe/txgbe_fdir.c index df6125d4a..d38e21e9e 100644 --- a/drivers/net/txgbe/txgbe_fdir.c +++ b/drivers/net/txgbe/txgbe_fdir.c @@ -7,7 +7,7 @@ #include #include #include - +#include #include "txgbe_logs.h" #include "base/txgbe.h" @@ -15,6 +15,7 @@ #define TXGBE_DEFAULT_FLEXBYTES_OFFSET 12 /*default flexbytes offset in bytes*/ #define TXGBE_MAX_FLX_SOURCE_OFF 62 +#define TXGBE_FDIRCMD_CMD_INTERVAL_US 10 #define IPV6_ADDR_TO_MASK(ipaddr, ipv6m) do { \ uint8_t ipv6_addr[16]; \ @@ -405,3 +406,472 @@ txgbe_fdir_configure(struct rte_eth_dev *dev) return 0; } +/* + * Note that the bkt_hash field in the txgbe_atr_input structure is also never + * set. + * + * Compute the hashes for SW ATR + * @stream: input bitstream to compute the hash on + * @key: 32-bit hash key + **/ +static uint32_t +txgbe_atr_compute_hash(struct txgbe_atr_input *atr_input, + uint32_t key) +{ + /* + * The algorithm is as follows: + * Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350 + * where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n] + * and A[n] x B[n] is bitwise AND between same length strings + * + * K[n] is 16 bits, defined as: + * for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15] + * for n modulo 32 < 15, K[n] = + * K[(n % 32:0) | (31:31 - (14 - (n % 32)))] + * + * S[n] is 16 bits, defined as: + * for n >= 15, S[n] = S[n:n - 15] + * for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))] + * + * To simplify for programming, the algorithm is implemented + * in software this way: + * + * key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0] + * + * for (i = 0; i < 352; i+=32) + * hi_hash_dword[31:0] ^= Stream[(i+31):i]; + * + * lo_hash_dword[15:0] ^= Stream[15:0]; + * lo_hash_dword[15:0] ^= hi_hash_dword[31:16]; + * lo_hash_dword[31:16] ^= hi_hash_dword[15:0]; + * + * hi_hash_dword[31:0] ^= Stream[351:320]; + * + * if (key[0]) + * hash[15:0] ^= Stream[15:0]; + * + * for (i = 0; i < 16; i++) { + * if (key[i]) + * hash[15:0] ^= lo_hash_dword[(i+15):i]; + * if (key[i + 16]) + * hash[15:0] ^= hi_hash_dword[(i+15):i]; + * } + * + */ + __be32 *dword_stream = (__be32 *)atr_input; + __be32 common_hash_dword = 0; + u32 hi_hash_dword, lo_hash_dword, flow_pool_ptid; + u32 hash_result = 0; + u8 i; + + /* record the flow_vm_vlan bits as they are a key part to the hash */ + flow_pool_ptid = be_to_cpu32(dword_stream[0]); + + /* generate common hash dword */ + for (i = 1; i <= 10; i++) + common_hash_dword ^= dword_stream[i]; + + hi_hash_dword = be_to_cpu32(common_hash_dword); + + /* low dword is word swapped version of common */ + lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); + + /* apply (Flow ID/VM Pool/Packet Type) bits to hash words */ + hi_hash_dword ^= flow_pool_ptid ^ (flow_pool_ptid >> 16); + + /* Process bits 0 and 16 */ + if (key & 0x0001) + hash_result ^= lo_hash_dword; + if (key & 0x00010000) + hash_result ^= hi_hash_dword; + + /* + * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to + * delay this because bit 0 of the stream should not be processed + * so we do not add the vlan until after bit 0 was processed + */ + lo_hash_dword ^= flow_pool_ptid ^ (flow_pool_ptid << 16); + + /* process the remaining 30 bits in the key 2 bits at a time */ + for (i = 15; i; i--) { + if (key & (0x0001 << i)) + hash_result ^= lo_hash_dword >> i; + if (key & (0x00010000 << i)) + hash_result ^= hi_hash_dword >> i; + } + + return hash_result; +} + +static uint32_t +atr_compute_perfect_hash(struct txgbe_atr_input *input, + enum rte_fdir_pballoc_type pballoc) +{ + uint32_t bucket_hash; + + bucket_hash = txgbe_atr_compute_hash(input, + TXGBE_ATR_BUCKET_HASH_KEY); + if (pballoc == RTE_FDIR_PBALLOC_256K) + bucket_hash &= PERFECT_BUCKET_256KB_HASH_MASK; + else if (pballoc == RTE_FDIR_PBALLOC_128K) + bucket_hash &= PERFECT_BUCKET_128KB_HASH_MASK; + else + bucket_hash &= PERFECT_BUCKET_64KB_HASH_MASK; + + return TXGBE_FDIRPIHASH_BKT(bucket_hash); +} + +/** + * txgbe_fdir_check_cmd_complete - poll to check whether FDIRPICMD is complete + * @hw: pointer to hardware structure + */ +static inline int +txgbe_fdir_check_cmd_complete(struct txgbe_hw *hw, uint32_t *fdircmd) +{ + int i; + + for (i = 0; i < TXGBE_FDIRCMD_CMD_POLL; i++) { + *fdircmd = rd32(hw, TXGBE_FDIRPICMD); + if (!(*fdircmd & TXGBE_FDIRPICMD_OP_MASK)) + return 0; + rte_delay_us(TXGBE_FDIRCMD_CMD_INTERVAL_US); + } + + return -ETIMEDOUT; +} + +/* + * Calculate the hash value needed for signature-match filters. In the FreeBSD + * driver, this is done by the optimised function + * txgbe_atr_compute_sig_hash_raptor(). However that can't be used here as it + * doesn't support calculating a hash for an IPv6 filter. + */ +static uint32_t +atr_compute_signature_hash(struct txgbe_atr_input *input, + enum rte_fdir_pballoc_type pballoc) +{ + uint32_t bucket_hash, sig_hash; + + bucket_hash = txgbe_atr_compute_hash(input, + TXGBE_ATR_BUCKET_HASH_KEY); + if (pballoc == RTE_FDIR_PBALLOC_256K) + bucket_hash &= SIG_BUCKET_256KB_HASH_MASK; + else if (pballoc == RTE_FDIR_PBALLOC_128K) + bucket_hash &= SIG_BUCKET_128KB_HASH_MASK; + else + bucket_hash &= SIG_BUCKET_64KB_HASH_MASK; + + sig_hash = txgbe_atr_compute_hash(input, + TXGBE_ATR_SIGNATURE_HASH_KEY); + + return TXGBE_FDIRPIHASH_SIG(sig_hash) | + TXGBE_FDIRPIHASH_BKT(bucket_hash); +} + +/** + * With the ability to set extra flags in FDIRPICMD register + * added, and IPv6 support also added. The hash value is also pre-calculated + * as the pballoc value is needed to do it. + */ +static int +fdir_write_perfect_filter(struct txgbe_hw *hw, + struct txgbe_atr_input *input, uint8_t queue, + uint32_t fdircmd, uint32_t fdirhash, + enum rte_fdir_mode mode) +{ + uint32_t fdirport, fdirflex; + int err = 0; + + UNREFERENCED_PARAMETER(mode); + + /* record the IPv4 address (little-endian) + * can not use wr32. + */ + wr32(hw, TXGBE_FDIRPISIP4, be_to_le32(input->src_ip[0])); + wr32(hw, TXGBE_FDIRPIDIP4, be_to_le32(input->dst_ip[0])); + + /* record source and destination port (little-endian)*/ + fdirport = TXGBE_FDIRPIPORT_DST(be_to_le16(input->dst_port)); + fdirport |= TXGBE_FDIRPIPORT_SRC(be_to_le16(input->src_port)); + wr32(hw, TXGBE_FDIRPIPORT, fdirport); + + /* record pkt_type (little-endian) and flex_bytes(big-endian) */ + fdirflex = TXGBE_FDIRPIFLEX_FLEX(be_to_npu16(input->flex_bytes)); + fdirflex |= TXGBE_FDIRPIFLEX_PTYPE(be_to_le16(input->pkt_type)); + wr32(hw, TXGBE_FDIRPIFLEX, fdirflex); + + /* configure FDIRHASH register */ + fdirhash |= TXGBE_FDIRPIHASH_VLD; + wr32(hw, TXGBE_FDIRPIHASH, fdirhash); + + /* + * flush all previous writes to make certain registers are + * programmed prior to issuing the command + */ + txgbe_flush(hw); + + /* configure FDIRPICMD register */ + fdircmd |= TXGBE_FDIRPICMD_OP_ADD | + TXGBE_FDIRPICMD_UPD | + TXGBE_FDIRPICMD_LAST | + TXGBE_FDIRPICMD_QPENA; + fdircmd |= TXGBE_FDIRPICMD_FT(input->flow_type); + fdircmd |= TXGBE_FDIRPICMD_QP(queue); + fdircmd |= TXGBE_FDIRPICMD_POOL(input->vm_pool); + + wr32(hw, TXGBE_FDIRPICMD, fdircmd); + + PMD_DRV_LOG(DEBUG, "Rx Queue=%x hash=%x", queue, fdirhash); + + err = txgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err < 0) + PMD_DRV_LOG(ERR, "Timeout writing flow director filter."); + + return err; +} + +/** + * This function supports setting extra fields in the FDIRPICMD register, and + * removes the code that was verifying the flow_type field. According to the + * documentation, a flow type of 00 (i.e. not TCP, UDP, or SCTP) is not + * supported, however it appears to work ok... + * Adds a signature hash filter + * @hw: pointer to hardware structure + * @input: unique input dword + * @queue: queue index to direct traffic to + * @fdircmd: any extra flags to set in fdircmd register + * @fdirhash: pre-calculated hash value for the filter + **/ +static int +fdir_add_signature_filter(struct txgbe_hw *hw, + struct txgbe_atr_input *input, uint8_t queue, uint32_t fdircmd, + uint32_t fdirhash) +{ + int err = 0; + + PMD_INIT_FUNC_TRACE(); + + /* configure FDIRPICMD register */ + fdircmd |= TXGBE_FDIRPICMD_OP_ADD | + TXGBE_FDIRPICMD_UPD | + TXGBE_FDIRPICMD_LAST | + TXGBE_FDIRPICMD_QPENA; + fdircmd |= TXGBE_FDIRPICMD_FT(input->flow_type); + fdircmd |= TXGBE_FDIRPICMD_QP(queue); + + fdirhash |= TXGBE_FDIRPIHASH_VLD; + wr32(hw, TXGBE_FDIRPIHASH, fdirhash); + wr32(hw, TXGBE_FDIRPICMD, fdircmd); + + PMD_DRV_LOG(DEBUG, "Rx Queue=%x hash=%x", queue, fdirhash); + + err = txgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err < 0) + PMD_DRV_LOG(ERR, "Timeout writing flow director filter."); + + return err; +} + +/* + * This is modified to take in the hash as a parameter so that + * it can be used for removing signature and perfect filters. + */ +static int +fdir_erase_filter_raptor(struct txgbe_hw *hw, uint32_t fdirhash) +{ + uint32_t fdircmd = 0; + int err = 0; + + wr32(hw, TXGBE_FDIRPIHASH, fdirhash); + + /* flush hash to HW */ + txgbe_flush(hw); + + /* Query if filter is present */ + wr32(hw, TXGBE_FDIRPICMD, TXGBE_FDIRPICMD_OP_QRY); + + err = txgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err < 0) { + PMD_INIT_LOG(ERR, "Timeout querying for flow director filter."); + return err; + } + + /* if filter exists in hardware then remove it */ + if (fdircmd & TXGBE_FDIRPICMD_VLD) { + wr32(hw, TXGBE_FDIRPIHASH, fdirhash); + txgbe_flush(hw); + wr32(hw, TXGBE_FDIRPICMD, TXGBE_FDIRPICMD_OP_REM); + } + + err = txgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err < 0) + PMD_INIT_LOG(ERR, "Timeout erasing flow director filter."); + + return err; +} + +static inline struct txgbe_fdir_filter * +txgbe_fdir_filter_lookup(struct txgbe_hw_fdir_info *fdir_info, + struct txgbe_atr_input *input) +{ + int ret; + + ret = rte_hash_lookup(fdir_info->hash_handle, (const void *)input); + if (ret < 0) + return NULL; + + return fdir_info->hash_map[ret]; +} + +static inline int +txgbe_insert_fdir_filter(struct txgbe_hw_fdir_info *fdir_info, + struct txgbe_fdir_filter *fdir_filter) +{ + int ret; + + ret = rte_hash_add_key(fdir_info->hash_handle, &fdir_filter->input); + if (ret < 0) { + PMD_DRV_LOG(ERR, + "Failed to insert fdir filter to hash table %d!", + ret); + return ret; + } + + fdir_info->hash_map[ret] = fdir_filter; + + TAILQ_INSERT_TAIL(&fdir_info->fdir_list, fdir_filter, entries); + + return 0; +} + +static inline int +txgbe_remove_fdir_filter(struct txgbe_hw_fdir_info *fdir_info, + struct txgbe_atr_input *input) +{ + int ret; + struct txgbe_fdir_filter *fdir_filter; + + ret = rte_hash_del_key(fdir_info->hash_handle, input); + if (ret < 0) + return ret; + + fdir_filter = fdir_info->hash_map[ret]; + fdir_info->hash_map[ret] = NULL; + + TAILQ_REMOVE(&fdir_info->fdir_list, fdir_filter, entries); + rte_free(fdir_filter); + + return 0; +} + +int +txgbe_fdir_filter_program(struct rte_eth_dev *dev, + struct txgbe_fdir_rule *rule, + bool del, + bool update) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + uint32_t fdirhash; + uint8_t queue; + bool is_perfect = FALSE; + int err; + struct txgbe_hw_fdir_info *info = TXGBE_DEV_FDIR(dev); + enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode; + struct txgbe_fdir_filter *node; + + if (fdir_mode == RTE_FDIR_MODE_NONE || + fdir_mode != rule->mode) + return -ENOTSUP; + + if (fdir_mode >= RTE_FDIR_MODE_PERFECT) + is_perfect = TRUE; + + if (is_perfect) { + if (rule->input.flow_type & TXGBE_ATR_L3TYPE_IPV6) { + PMD_DRV_LOG(ERR, "IPv6 is not supported in" + " perfect mode!"); + return -ENOTSUP; + } + fdirhash = atr_compute_perfect_hash(&rule->input, + dev->data->dev_conf.fdir_conf.pballoc); + fdirhash |= TXGBE_FDIRPIHASH_IDX(rule->soft_id); + } else { + fdirhash = atr_compute_signature_hash(&rule->input, + dev->data->dev_conf.fdir_conf.pballoc); + } + + if (del) { + err = txgbe_remove_fdir_filter(info, &rule->input); + if (err < 0) { + PMD_DRV_LOG(ERR, + "No such fdir filter to delete %d!", err); + return err; + } + + err = fdir_erase_filter_raptor(hw, fdirhash); + if (err < 0) + PMD_DRV_LOG(ERR, "Fail to delete FDIR filter!"); + else + PMD_DRV_LOG(DEBUG, "Success to delete FDIR filter!"); + return err; + } + + /* add or update an fdir filter*/ + if (rule->fdirflags & TXGBE_FDIRPICMD_DROP) { + if (!is_perfect) { + PMD_DRV_LOG(ERR, "Drop option is not supported in" + " signature mode."); + return -EINVAL; + } + queue = dev->data->dev_conf.fdir_conf.drop_queue; + } else if (rule->queue < TXGBE_MAX_RX_QUEUE_NUM) { + queue = rule->queue; + } else { + return -EINVAL; + } + + node = txgbe_fdir_filter_lookup(info, &rule->input); + if (node) { + if (!update) { + PMD_DRV_LOG(ERR, "Conflict with existing fdir filter!"); + return -EINVAL; + } + node->fdirflags = rule->fdirflags; + node->fdirhash = fdirhash; + node->queue = queue; + } else { + node = rte_zmalloc("txgbe_fdir", + sizeof(struct txgbe_fdir_filter), 0); + if (!node) + return -ENOMEM; + rte_memcpy(&node->input, &rule->input, + sizeof(struct txgbe_atr_input)); + node->fdirflags = rule->fdirflags; + node->fdirhash = fdirhash; + node->queue = queue; + + err = txgbe_insert_fdir_filter(info, node); + if (err < 0) { + rte_free(node); + return err; + } + } + + if (is_perfect) + err = fdir_write_perfect_filter(hw, &node->input, + node->queue, node->fdirflags, + node->fdirhash, fdir_mode); + else + err = fdir_add_signature_filter(hw, &node->input, + node->queue, node->fdirflags, + node->fdirhash); + if (err < 0) { + PMD_DRV_LOG(ERR, "Fail to add FDIR filter!"); + txgbe_remove_fdir_filter(info, &rule->input); + } else { + PMD_DRV_LOG(DEBUG, "Success to add FDIR filter"); + } + + return err; +} + From patchwork Tue Nov 3 10:07:57 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 83539 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id D2651A0521; Tue, 3 Nov 2020 11:13:55 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 8EAB3C9FC; Tue, 3 Nov 2020 11:07:32 +0100 (CET) Received: from smtpbg511.qq.com (smtpbg511.qq.com [203.205.250.109]) by dpdk.org (Postfix) with ESMTP id 3DAAFC960 for ; Tue, 3 Nov 2020 11:07:19 +0100 (CET) X-QQ-mid: bizesmtp26t1604398032tvkgziqq Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp10.qq.com (ESMTP) with id ; Tue, 03 Nov 2020 18:07:11 +0800 (CST) X-QQ-SSF: 01400000002000C0C000B00A0000000 X-QQ-FEAT: Z+6pZKFuMyV2JfN+H10+iNXdWcQqOlMUpYh7Mwnq2wYN1sciiYjNQ4hiWcWUX e1Lo8lR5jaDhHyWshbV0Kg3/r5tsNg8AWWMHrBXVXXzJlinKKO9byI+/UojPHQVX6nNW92A tzCm3ud1TQbYniFDR5Q1sKzjsc9pANjd/Leq7EY9KEWxdUK1zLErUaQz9eVxKEwdCrQ3XWi dWKWbd7pC6RmB0c1SbjAd4C73vemVSSo+UqTVoZ5CWPs7PuuC75ju4Yg4uCEFL/DWtrSNL5 /jwsHA605OLDnuqeJ8Hcd/1fXuY3XeMrYpp5rxSXjFJJskRW57whf46f/kqLz1qtubOX0Q/ 9rD4Y7OJSpyofz2j5w= X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Tue, 3 Nov 2020 18:07:57 +0800 Message-Id: <20201103100818.311881-17-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20201103100818.311881-1-jiawenwu@trustnetic.com> References: <20201103100818.311881-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign7 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH 16/37] net/txgbe: add FDIR parse normal rule X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add support to parse flow for fdir filter. Signed-off-by: Jiawen Wu --- drivers/net/txgbe/base/txgbe_type.h | 10 + drivers/net/txgbe/txgbe_flow.c | 856 ++++++++++++++++++++++++++++ 2 files changed, 866 insertions(+) diff --git a/drivers/net/txgbe/base/txgbe_type.h b/drivers/net/txgbe/base/txgbe_type.h index 160d5253a..a73f66d39 100644 --- a/drivers/net/txgbe/base/txgbe_type.h +++ b/drivers/net/txgbe/base/txgbe_type.h @@ -83,6 +83,16 @@ enum { #define TXGBE_ATR_L4TYPE_SCTP 0x3 #define TXGBE_ATR_TUNNEL_MASK 0x10 #define TXGBE_ATR_TUNNEL_ANY 0x10 +enum txgbe_atr_flow_type { + TXGBE_ATR_FLOW_TYPE_IPV4 = 0x0, + TXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1, + TXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2, + TXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3, + TXGBE_ATR_FLOW_TYPE_IPV6 = 0x4, + TXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5, + TXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6, + TXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7, +}; /* Flow Director ATR input struct. */ struct txgbe_atr_input { diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c index 8589e3328..ba1be9f12 100644 --- a/drivers/net/txgbe/txgbe_flow.c +++ b/drivers/net/txgbe/txgbe_flow.c @@ -27,6 +27,7 @@ #define TXGBE_MIN_N_TUPLE_PRIO 1 #define TXGBE_MAX_N_TUPLE_PRIO 7 +#define TXGBE_MAX_FLX_SOURCE_OFF 62 /** * Endless loop will never happen with below assumption @@ -1242,3 +1243,858 @@ txgbe_parse_l2_tn_filter(struct rte_eth_dev *dev, return ret; } +/* Parse to get the attr and action info of flow director rule. */ +static int +txgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr, + const struct rte_flow_action actions[], + struct txgbe_fdir_rule *rule, + struct rte_flow_error *error) +{ + const struct rte_flow_action *act; + const struct rte_flow_action_queue *act_q; + const struct rte_flow_action_mark *mark; + + /* parse attr */ + /* must be input direction */ + if (!attr->ingress) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, "Only support ingress."); + return -rte_errno; + } + + /* not supported */ + if (attr->egress) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + attr, "Not support egress."); + return -rte_errno; + } + + /* not supported */ + if (attr->transfer) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + + /* not supported */ + if (attr->priority) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Not support priority."); + return -rte_errno; + } + + /* check if the first not void action is QUEUE or DROP. */ + act = next_no_void_action(actions, NULL); + if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE && + act->type != RTE_FLOW_ACTION_TYPE_DROP) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) { + act_q = (const struct rte_flow_action_queue *)act->conf; + rule->queue = act_q->index; + } else { /* drop */ + /* signature mode does not support drop action. */ + if (rule->mode == RTE_FDIR_MODE_SIGNATURE) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + rule->fdirflags = TXGBE_FDIRPICMD_DROP; + } + + /* check if the next not void item is MARK */ + act = next_no_void_action(actions, act); + if (act->type != RTE_FLOW_ACTION_TYPE_MARK && + act->type != RTE_FLOW_ACTION_TYPE_END) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + rule->soft_id = 0; + + if (act->type == RTE_FLOW_ACTION_TYPE_MARK) { + mark = (const struct rte_flow_action_mark *)act->conf; + rule->soft_id = mark->id; + act = next_no_void_action(actions, act); + } + + /* check if the next not void item is END */ + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + return 0; +} + +/* search next no void pattern and skip fuzzy */ +static inline +const struct rte_flow_item *next_no_fuzzy_pattern( + const struct rte_flow_item pattern[], + const struct rte_flow_item *cur) +{ + const struct rte_flow_item *next = + next_no_void_pattern(pattern, cur); + while (1) { + if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY) + return next; + next = next_no_void_pattern(pattern, next); + } +} + +static inline uint8_t signature_match(const struct rte_flow_item pattern[]) +{ + const struct rte_flow_item_fuzzy *spec, *last, *mask; + const struct rte_flow_item *item; + uint32_t sh, lh, mh; + int i = 0; + + while (1) { + item = pattern + i; + if (item->type == RTE_FLOW_ITEM_TYPE_END) + break; + + if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) { + spec = item->spec; + last = item->last; + mask = item->mask; + + if (!spec || !mask) + return 0; + + sh = spec->thresh; + + if (!last) + lh = sh; + else + lh = last->thresh; + + mh = mask->thresh; + sh = sh & mh; + lh = lh & mh; + + if (!sh || sh > lh) + return 0; + + return 1; + } + + i++; + } + + return 0; +} + +/** + * Parse the rule to see if it is a IP or MAC VLAN flow director rule. + * And get the flow director filter info BTW. + * UDP/TCP/SCTP PATTERN: + * The first not void item can be ETH or IPV4 or IPV6 + * The second not void item must be IPV4 or IPV6 if the first one is ETH. + * The next not void item could be UDP or TCP or SCTP (optional) + * The next not void item could be RAW (for flexbyte, optional) + * The next not void item must be END. + * A Fuzzy Match pattern can appear at any place before END. + * Fuzzy Match is optional for IPV4 but is required for IPV6 + * MAC VLAN PATTERN: + * The first not void item must be ETH. + * The second not void item must be MAC VLAN. + * The next not void item must be END. + * ACTION: + * The first not void action should be QUEUE or DROP. + * The second not void optional action should be MARK, + * mark_id is a uint32_t number. + * The next not void action should be END. + * UDP/TCP/SCTP pattern example: + * ITEM Spec Mask + * ETH NULL NULL + * IPV4 src_addr 192.168.1.20 0xFFFFFFFF + * dst_addr 192.167.3.50 0xFFFFFFFF + * UDP/TCP/SCTP src_port 80 0xFFFF + * dst_port 80 0xFFFF + * FLEX relative 0 0x1 + * search 0 0x1 + * reserved 0 0 + * offset 12 0xFFFFFFFF + * limit 0 0xFFFF + * length 2 0xFFFF + * pattern[0] 0x86 0xFF + * pattern[1] 0xDD 0xFF + * END + * MAC VLAN pattern example: + * ITEM Spec Mask + * ETH dst_addr + {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF, + 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF} + * MAC VLAN tci 0x2016 0xEFFF + * END + * Other members in mask and spec should set to 0x00. + * Item->last should be NULL. + */ +static int +txgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev __rte_unused, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct txgbe_fdir_rule *rule, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item; + const struct rte_flow_item_eth *eth_mask; + const struct rte_flow_item_ipv4 *ipv4_spec; + const struct rte_flow_item_ipv4 *ipv4_mask; + const struct rte_flow_item_ipv6 *ipv6_spec; + const struct rte_flow_item_ipv6 *ipv6_mask; + const struct rte_flow_item_tcp *tcp_spec; + const struct rte_flow_item_tcp *tcp_mask; + const struct rte_flow_item_udp *udp_spec; + const struct rte_flow_item_udp *udp_mask; + const struct rte_flow_item_sctp *sctp_spec; + const struct rte_flow_item_sctp *sctp_mask; + const struct rte_flow_item_raw *raw_mask; + const struct rte_flow_item_raw *raw_spec; + u32 ptype = 0; + uint8_t j; + + if (!pattern) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, "NULL pattern."); + return -rte_errno; + } + + if (!actions) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, + NULL, "NULL action."); + return -rte_errno; + } + + if (!attr) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + NULL, "NULL attribute."); + return -rte_errno; + } + + /** + * Some fields may not be provided. Set spec to 0 and mask to default + * value. So, we need not do anything for the not provided fields later. + */ + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + memset(&rule->mask, 0xFF, sizeof(struct txgbe_hw_fdir_mask)); + rule->mask.vlan_tci_mask = 0; + rule->mask.flex_bytes_mask = 0; + + /** + * The first not void item should be + * MAC or IPv4 or TCP or UDP or SCTP. + */ + item = next_no_fuzzy_pattern(pattern, NULL); + if (item->type != RTE_FLOW_ITEM_TYPE_ETH && + item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_IPV6 && + item->type != RTE_FLOW_ITEM_TYPE_TCP && + item->type != RTE_FLOW_ITEM_TYPE_UDP && + item->type != RTE_FLOW_ITEM_TYPE_SCTP) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + + if (signature_match(pattern)) + rule->mode = RTE_FDIR_MODE_SIGNATURE; + else + rule->mode = RTE_FDIR_MODE_PERFECT; + + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + /* Get the MAC info. */ + if (item->type == RTE_FLOW_ITEM_TYPE_ETH) { + /** + * Only support vlan and dst MAC address, + * others should be masked. + */ + if (item->spec && !item->mask) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + + if (item->mask) { + rule->b_mask = TRUE; + eth_mask = item->mask; + + /* Ether type should be masked. */ + if (eth_mask->type || + rule->mode == RTE_FDIR_MODE_SIGNATURE) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + + /* If ethernet has meaning, it means MAC VLAN mode. */ + rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN; + + /** + * src MAC address must be masked, + * and don't support dst MAC address mask. + */ + for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) { + if (eth_mask->src.addr_bytes[j] || + eth_mask->dst.addr_bytes[j] != 0xFF) { + memset(rule, 0, + sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + + /* When no VLAN, considered as full mask. */ + rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF); + } + /*** If both spec and mask are item, + * it means don't care about ETH. + * Do nothing. + */ + + /** + * Check if the next not void item is vlan or ipv4. + * IPv6 is not supported. + */ + item = next_no_fuzzy_pattern(pattern, item); + if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) { + if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } else { + if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_VLAN) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + } + + /* Get the IPV4 info. */ + if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) { + /** + * Set the flow type even if there's no content + * as we must have a flow type. + */ + rule->input.flow_type = TXGBE_ATR_FLOW_TYPE_IPV4; + ptype = txgbe_ptype_table[TXGBE_PT_IPV4]; + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + /** + * Only care about src & dst addresses, + * others should be masked. + */ + if (!item->mask) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + rule->b_mask = TRUE; + ipv4_mask = item->mask; + if (ipv4_mask->hdr.version_ihl || + ipv4_mask->hdr.type_of_service || + ipv4_mask->hdr.total_length || + ipv4_mask->hdr.packet_id || + ipv4_mask->hdr.fragment_offset || + ipv4_mask->hdr.time_to_live || + ipv4_mask->hdr.next_proto_id || + ipv4_mask->hdr.hdr_checksum) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr; + rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr; + + if (item->spec) { + rule->b_spec = TRUE; + ipv4_spec = item->spec; + rule->input.dst_ip[0] = + ipv4_spec->hdr.dst_addr; + rule->input.src_ip[0] = + ipv4_spec->hdr.src_addr; + } + + /** + * Check if the next not void item is + * TCP or UDP or SCTP or END. + */ + item = next_no_fuzzy_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_TCP && + item->type != RTE_FLOW_ITEM_TYPE_UDP && + item->type != RTE_FLOW_ITEM_TYPE_SCTP && + item->type != RTE_FLOW_ITEM_TYPE_END && + item->type != RTE_FLOW_ITEM_TYPE_RAW) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + + /* Get the IPV6 info. */ + if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) { + /** + * Set the flow type even if there's no content + * as we must have a flow type. + */ + rule->input.flow_type = TXGBE_ATR_FLOW_TYPE_IPV6; + ptype = txgbe_ptype_table[TXGBE_PT_IPV6]; + + /** + * 1. must signature match + * 2. not support last + * 3. mask must not null + */ + if (rule->mode != RTE_FDIR_MODE_SIGNATURE || + item->last || + !item->mask) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + rule->b_mask = TRUE; + ipv6_mask = item->mask; + if (ipv6_mask->hdr.vtc_flow || + ipv6_mask->hdr.payload_len || + ipv6_mask->hdr.proto || + ipv6_mask->hdr.hop_limits) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + + /* check src addr mask */ + for (j = 0; j < 16; j++) { + if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) { + rule->mask.src_ipv6_mask |= 1 << j; + } else if (ipv6_mask->hdr.src_addr[j] != 0) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + + /* check dst addr mask */ + for (j = 0; j < 16; j++) { + if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) { + rule->mask.dst_ipv6_mask |= 1 << j; + } else if (ipv6_mask->hdr.dst_addr[j] != 0) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + + if (item->spec) { + rule->b_spec = TRUE; + ipv6_spec = item->spec; + rte_memcpy(rule->input.src_ip, + ipv6_spec->hdr.src_addr, 16); + rte_memcpy(rule->input.dst_ip, + ipv6_spec->hdr.dst_addr, 16); + } + + /** + * Check if the next not void item is + * TCP or UDP or SCTP or END. + */ + item = next_no_fuzzy_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_TCP && + item->type != RTE_FLOW_ITEM_TYPE_UDP && + item->type != RTE_FLOW_ITEM_TYPE_SCTP && + item->type != RTE_FLOW_ITEM_TYPE_END && + item->type != RTE_FLOW_ITEM_TYPE_RAW) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + + /* Get the TCP info. */ + if (item->type == RTE_FLOW_ITEM_TYPE_TCP) { + /** + * Set the flow type even if there's no content + * as we must have a flow type. + */ + rule->input.flow_type |= TXGBE_ATR_L4TYPE_TCP; + ptype = txgbe_ptype_table[TXGBE_PT_IPV4_TCP]; + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + /** + * Only care about src & dst ports, + * others should be masked. + */ + if (!item->mask) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + rule->b_mask = TRUE; + tcp_mask = item->mask; + if (tcp_mask->hdr.sent_seq || + tcp_mask->hdr.recv_ack || + tcp_mask->hdr.data_off || + tcp_mask->hdr.tcp_flags || + tcp_mask->hdr.rx_win || + tcp_mask->hdr.cksum || + tcp_mask->hdr.tcp_urp) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + rule->mask.src_port_mask = tcp_mask->hdr.src_port; + rule->mask.dst_port_mask = tcp_mask->hdr.dst_port; + + if (item->spec) { + rule->b_spec = TRUE; + tcp_spec = item->spec; + rule->input.src_port = + tcp_spec->hdr.src_port; + rule->input.dst_port = + tcp_spec->hdr.dst_port; + } + + item = next_no_fuzzy_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_RAW && + item->type != RTE_FLOW_ITEM_TYPE_END) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + + /* Get the UDP info */ + if (item->type == RTE_FLOW_ITEM_TYPE_UDP) { + /** + * Set the flow type even if there's no content + * as we must have a flow type. + */ + rule->input.flow_type |= TXGBE_ATR_L4TYPE_UDP; + ptype = txgbe_ptype_table[TXGBE_PT_IPV4_UDP]; + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + /** + * Only care about src & dst ports, + * others should be masked. + */ + if (!item->mask) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + rule->b_mask = TRUE; + udp_mask = item->mask; + if (udp_mask->hdr.dgram_len || + udp_mask->hdr.dgram_cksum) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + rule->mask.src_port_mask = udp_mask->hdr.src_port; + rule->mask.dst_port_mask = udp_mask->hdr.dst_port; + + if (item->spec) { + rule->b_spec = TRUE; + udp_spec = item->spec; + rule->input.src_port = + udp_spec->hdr.src_port; + rule->input.dst_port = + udp_spec->hdr.dst_port; + } + + item = next_no_fuzzy_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_RAW && + item->type != RTE_FLOW_ITEM_TYPE_END) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + + /* Get the SCTP info */ + if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) { + /** + * Set the flow type even if there's no content + * as we must have a flow type. + */ + rule->input.flow_type |= TXGBE_ATR_L4TYPE_SCTP; + ptype = txgbe_ptype_table[TXGBE_PT_IPV4_SCTP]; + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + /** + * Only care about src & dst ports, + * others should be masked. + */ + if (!item->mask) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + rule->b_mask = TRUE; + sctp_mask = item->mask; + if (sctp_mask->hdr.tag || + sctp_mask->hdr.cksum) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + rule->mask.src_port_mask = sctp_mask->hdr.src_port; + rule->mask.dst_port_mask = sctp_mask->hdr.dst_port; + + if (item->spec) { + rule->b_spec = TRUE; + sctp_spec = item->spec; + rule->input.src_port = + sctp_spec->hdr.src_port; + rule->input.dst_port = + sctp_spec->hdr.dst_port; + } + /* others even sctp port is not supported */ + sctp_mask = item->mask; + if (sctp_mask && + (sctp_mask->hdr.src_port || + sctp_mask->hdr.dst_port || + sctp_mask->hdr.tag || + sctp_mask->hdr.cksum)) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + + item = next_no_fuzzy_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_RAW && + item->type != RTE_FLOW_ITEM_TYPE_END) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + + /* Get the flex byte info */ + if (item->type == RTE_FLOW_ITEM_TYPE_RAW) { + /* Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + /* mask should not be null */ + if (!item->mask || !item->spec) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + + raw_mask = item->mask; + + /* check mask */ + if (raw_mask->relative != 0x1 || + raw_mask->search != 0x1 || + raw_mask->reserved != 0x0 || + (uint32_t)raw_mask->offset != 0xffffffff || + raw_mask->limit != 0xffff || + raw_mask->length != 0xffff) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + + raw_spec = item->spec; + + /* check spec */ + if (raw_spec->relative != 0 || + raw_spec->search != 0 || + raw_spec->reserved != 0 || + raw_spec->offset > TXGBE_MAX_FLX_SOURCE_OFF || + raw_spec->offset % 2 || + raw_spec->limit != 0 || + raw_spec->length != 2 || + /* pattern can't be 0xffff */ + (raw_spec->pattern[0] == 0xff && + raw_spec->pattern[1] == 0xff)) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + + /* check pattern mask */ + if (raw_mask->pattern[0] != 0xff || + raw_mask->pattern[1] != 0xff) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + + rule->mask.flex_bytes_mask = 0xffff; + rule->input.flex_bytes = + (((uint16_t)raw_spec->pattern[1]) << 8) | + raw_spec->pattern[0]; + rule->flex_bytes_offset = raw_spec->offset; + } + + if (item->type != RTE_FLOW_ITEM_TYPE_END) { + /* check if the next not void item is END */ + item = next_no_fuzzy_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_END) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + + rule->input.pkt_type = cpu_to_be16(txgbe_encode_ptype(ptype)); + + return txgbe_parse_fdir_act_attr(attr, actions, rule, error); +} + +static int +txgbe_parse_fdir_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct txgbe_fdir_rule *rule, + struct rte_flow_error *error) +{ + int ret; + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode; + + ret = txgbe_parse_fdir_filter_normal(dev, attr, pattern, + actions, rule, error); + if (!ret) + goto step_next; + +step_next: + + if (hw->mac.type == txgbe_mac_raptor && + rule->fdirflags == TXGBE_FDIRPICMD_DROP && + (rule->input.src_port != 0 || rule->input.dst_port != 0)) + return -ENOTSUP; + + if (fdir_mode == RTE_FDIR_MODE_NONE || + fdir_mode != rule->mode) + return -ENOTSUP; + + if (rule->queue >= dev->data->nb_rx_queues) + return -ENOTSUP; + + return ret; +} + From patchwork Tue Nov 3 10:07:58 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 83537 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id B4059A0521; Tue, 3 Nov 2020 11:13:14 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id E8C63C9E6; Tue, 3 Nov 2020 11:07:29 +0100 (CET) Received: from smtpbgeu2.qq.com (smtpbgeu2.qq.com [18.194.254.142]) by dpdk.org (Postfix) with ESMTP id A92DDC7FE for ; Tue, 3 Nov 2020 11:07:17 +0100 (CET) X-QQ-mid: bizesmtp26t1604398033tu40is09 Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp10.qq.com (ESMTP) with id ; Tue, 03 Nov 2020 18:07:13 +0800 (CST) X-QQ-SSF: 01400000002000C0C000B00A0000000 X-QQ-FEAT: e2WfOmmCKKYXW1Y++uWYr5hZhJEJ+oWhO6UMcKWJdtdkeYscYiJpoqsk3Ygdj GFkGJkDCs8fSqqsrpDYLEgBBei1u+DG9olq6kyNLC/BEqjBn3dyuL9ENX13JUPsrcJYQE8E 3TRFtGfJR9isPtNFCO0P2FF2mr9jvr5E7YdAGeb7OutpCExyHmiWTwJrGOyB8Eol3026D9K GjZtmOn73RxgcJV+7jHSMOb04nkjSQuinuDYL7J+oR8DyimZIhyAXorU+J05vDdD8LLh5SI xmAznYceh3GcEeJgxdUJiO8HJ7KkiUfmDSSESMl/WISUPkqf7jWgKnyRFTj3yE8K+U/gT2W GmGgv0G93mFFCOyiNk= X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Tue, 3 Nov 2020 18:07:58 +0800 Message-Id: <20201103100818.311881-18-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20201103100818.311881-1-jiawenwu@trustnetic.com> References: <20201103100818.311881-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign7 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH 17/37] net/txgbe: add FDIR parse tunnel rule X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add support to parse tunnel flow for fdir filter. Signed-off-by: Jiawen Wu --- drivers/net/txgbe/base/txgbe_type.h | 8 + drivers/net/txgbe/txgbe_flow.c | 290 ++++++++++++++++++++++++++++ 2 files changed, 298 insertions(+) diff --git a/drivers/net/txgbe/base/txgbe_type.h b/drivers/net/txgbe/base/txgbe_type.h index a73f66d39..22efcef78 100644 --- a/drivers/net/txgbe/base/txgbe_type.h +++ b/drivers/net/txgbe/base/txgbe_type.h @@ -92,6 +92,14 @@ enum txgbe_atr_flow_type { TXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5, TXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6, TXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7, + TXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4 = 0x10, + TXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4 = 0x11, + TXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4 = 0x12, + TXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4 = 0x13, + TXGBE_ATR_FLOW_TYPE_TUNNELED_IPV6 = 0x14, + TXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV6 = 0x15, + TXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV6 = 0x16, + TXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV6 = 0x17, }; /* Flow Director ATR input struct. */ diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c index ba1be9f12..b7d0e08a9 100644 --- a/drivers/net/txgbe/txgbe_flow.c +++ b/drivers/net/txgbe/txgbe_flow.c @@ -2064,6 +2064,291 @@ txgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev __rte_unused, return txgbe_parse_fdir_act_attr(attr, actions, rule, error); } +/** + * Parse the rule to see if it is a VxLAN or NVGRE flow director rule. + * And get the flow director filter info BTW. + * VxLAN PATTERN: + * The first not void item must be ETH. + * The second not void item must be IPV4/ IPV6. + * The third not void item must be NVGRE. + * The next not void item must be END. + * NVGRE PATTERN: + * The first not void item must be ETH. + * The second not void item must be IPV4/ IPV6. + * The third not void item must be NVGRE. + * The next not void item must be END. + * ACTION: + * The first not void action should be QUEUE or DROP. + * The second not void optional action should be MARK, + * mark_id is a uint32_t number. + * The next not void action should be END. + * VxLAN pattern example: + * ITEM Spec Mask + * ETH NULL NULL + * IPV4/IPV6 NULL NULL + * UDP NULL NULL + * VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF} + * MAC VLAN tci 0x2016 0xEFFF + * END + * NEGRV pattern example: + * ITEM Spec Mask + * ETH NULL NULL + * IPV4/IPV6 NULL NULL + * NVGRE protocol 0x6558 0xFFFF + * tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF} + * MAC VLAN tci 0x2016 0xEFFF + * END + * other members in mask and spec should set to 0x00. + * item->last should be NULL. + */ +static int +txgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct txgbe_fdir_rule *rule, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item; + const struct rte_flow_item_eth *eth_mask; + uint32_t j; + + if (!pattern) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, "NULL pattern."); + return -rte_errno; + } + + if (!actions) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, + NULL, "NULL action."); + return -rte_errno; + } + + if (!attr) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + NULL, "NULL attribute."); + return -rte_errno; + } + + /** + * Some fields may not be provided. Set spec to 0 and mask to default + * value. So, we need not do anything for the not provided fields later. + */ + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + memset(&rule->mask, 0xFF, sizeof(struct txgbe_hw_fdir_mask)); + rule->mask.vlan_tci_mask = 0; + + /** + * The first not void item should be + * MAC or IPv4 or IPv6 or UDP or VxLAN. + */ + item = next_no_void_pattern(pattern, NULL); + if (item->type != RTE_FLOW_ITEM_TYPE_ETH && + item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_IPV6 && + item->type != RTE_FLOW_ITEM_TYPE_UDP && + item->type != RTE_FLOW_ITEM_TYPE_VXLAN && + item->type != RTE_FLOW_ITEM_TYPE_NVGRE) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + + rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL; + + /* Skip MAC. */ + if (item->type == RTE_FLOW_ITEM_TYPE_ETH) { + /* Only used to describe the protocol stack. */ + if (item->spec || item->mask) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + /* Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + /* Check if the next not void item is IPv4 or IPv6. */ + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_IPV6) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + + /* Skip IP. */ + if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 || + item->type == RTE_FLOW_ITEM_TYPE_IPV6) { + /* Only used to describe the protocol stack. */ + if (item->spec || item->mask) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + /* Check if the next not void item is UDP or NVGRE. */ + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_UDP && + item->type != RTE_FLOW_ITEM_TYPE_NVGRE) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + + /* Skip UDP. */ + if (item->type == RTE_FLOW_ITEM_TYPE_UDP) { + /* Only used to describe the protocol stack. */ + if (item->spec || item->mask) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + /* Check if the next not void item is VxLAN. */ + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + + /* check if the next not void item is MAC */ + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_ETH) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + + /** + * Only support vlan and dst MAC address, + * others should be masked. + */ + + if (!item->mask) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + rule->b_mask = TRUE; + eth_mask = item->mask; + + /* Ether type should be masked. */ + if (eth_mask->type) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + + /* src MAC address should be masked. */ + for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) { + if (eth_mask->src.addr_bytes[j]) { + memset(rule, 0, + sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + rule->mask.mac_addr_byte_mask = 0; + for (j = 0; j < ETH_ADDR_LEN; j++) { + /* It's a per byte mask. */ + if (eth_mask->dst.addr_bytes[j] == 0xFF) { + rule->mask.mac_addr_byte_mask |= 0x1 << j; + } else if (eth_mask->dst.addr_bytes[j]) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + + /* When no vlan, considered as full mask. */ + rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF); + + /** + * Check if the next not void item is vlan or ipv4. + * IPv6 is not supported. + */ + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_VLAN && + item->type != RTE_FLOW_ITEM_TYPE_IPV4) { + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + /** + * If the tags is 0, it means don't care about the VLAN. + * Do nothing. + */ + + return txgbe_parse_fdir_act_attr(attr, actions, rule, error); +} + static int txgbe_parse_fdir_filter(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, @@ -2081,6 +2366,11 @@ txgbe_parse_fdir_filter(struct rte_eth_dev *dev, if (!ret) goto step_next; + ret = txgbe_parse_fdir_filter_tunnel(attr, pattern, + actions, rule, error); + if (ret) + return ret; + step_next: if (hw->mac.type == txgbe_mac_raptor && From patchwork Tue Nov 3 10:07:59 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 83538 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id BE1F2A0521; Tue, 3 Nov 2020 11:13:35 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 4B877C9F2; Tue, 3 Nov 2020 11:07:31 +0100 (CET) Received: from smtpbgeu1.qq.com (smtpbgeu1.qq.com [52.59.177.22]) by dpdk.org (Postfix) with ESMTP id BCE19C96A for ; Tue, 3 Nov 2020 11:07:19 +0100 (CET) X-QQ-mid: bizesmtp26t1604398034te7dvqar Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp10.qq.com (ESMTP) with id ; Tue, 03 Nov 2020 18:07:14 +0800 (CST) X-QQ-SSF: 01400000002000C0C000B00A0000000 X-QQ-FEAT: QPwFcbFodZQr43jYv6G09K7MVRI2gxi2VGmHxN1n3PLXxwl9Q1zdUUbX51zbF oo+P6JQSkigbhLXXduIOw7qjlHqcimVWtyU7IMFZhMp8LmeqN3ejU9y2r6MdO/oLkO1ySM0 KnvTD4OyjtBBXyverVjpdMb+6zQzjQxpe8Ii16q/ZSAsxpQo1qD+ftTvVDKH3kRv2otiVu6 Jqus7W+UUc6DewZg1IPaqnolcbw28/BBeFmRxU9v2gg9xE9Ddc/LfHLdAQoLqkKVOCwNtKk 7JnOhpasR68YaWMKYVxg8dkAbHEgeKlAk8HpIkRs+xUGAxRbUxeSFAk+kkDKp1cnqfIXr03 lau7yZ34YE6zk87HsYu4Hqjqk+dig== X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Tue, 3 Nov 2020 18:07:59 +0800 Message-Id: <20201103100818.311881-19-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20201103100818.311881-1-jiawenwu@trustnetic.com> References: <20201103100818.311881-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign7 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH 18/37] net/txgbe: add FDIR restore operation X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add restore operation on FDIR filter. Signed-off-by: Jiawen Wu --- drivers/net/txgbe/txgbe_ethdev.c | 1 + drivers/net/txgbe/txgbe_ethdev.h | 2 ++ drivers/net/txgbe/txgbe_fdir.c | 34 ++++++++++++++++++++++++++++++++ 3 files changed, 37 insertions(+) diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c index df2efe80f..a17d7b190 100644 --- a/drivers/net/txgbe/txgbe_ethdev.c +++ b/drivers/net/txgbe/txgbe_ethdev.c @@ -5077,6 +5077,7 @@ txgbe_filter_restore(struct rte_eth_dev *dev) txgbe_ntuple_filter_restore(dev); txgbe_ethertype_filter_restore(dev); txgbe_syn_filter_restore(dev); + txgbe_fdir_filter_restore(dev); txgbe_l2_tn_filter_restore(dev); return 0; diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h index 5e7fd0a86..88c7d8191 100644 --- a/drivers/net/txgbe/txgbe_ethdev.h +++ b/drivers/net/txgbe/txgbe_ethdev.h @@ -469,6 +469,8 @@ int txgbe_pf_host_configure(struct rte_eth_dev *eth_dev); uint32_t txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val); +void txgbe_fdir_filter_restore(struct rte_eth_dev *dev); + void txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev); void txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev); void txgbe_clear_syn_filter(struct rte_eth_dev *dev); diff --git a/drivers/net/txgbe/txgbe_fdir.c b/drivers/net/txgbe/txgbe_fdir.c index d38e21e9e..2faf7fd84 100644 --- a/drivers/net/txgbe/txgbe_fdir.c +++ b/drivers/net/txgbe/txgbe_fdir.c @@ -875,3 +875,37 @@ txgbe_fdir_filter_program(struct rte_eth_dev *dev, return err; } +/* restore flow director filter */ +void +txgbe_fdir_filter_restore(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev); + struct txgbe_fdir_filter *node; + bool is_perfect = FALSE; + enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode; + + if (fdir_mode >= RTE_FDIR_MODE_PERFECT && + fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) + is_perfect = TRUE; + + if (is_perfect) { + TAILQ_FOREACH(node, &fdir_info->fdir_list, entries) { + (void)fdir_write_perfect_filter(hw, + &node->input, + node->queue, + node->fdirflags, + node->fdirhash, + fdir_mode); + } + } else { + TAILQ_FOREACH(node, &fdir_info->fdir_list, entries) { + (void)fdir_add_signature_filter(hw, + &node->input, + node->queue, + node->fdirflags, + node->fdirhash); + } + } +} + From patchwork Tue Nov 3 10:08:00 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 83541 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 905EAA0521; Tue, 3 Nov 2020 11:14:40 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id EE01AC996; Tue, 3 Nov 2020 11:07:35 +0100 (CET) Received: from smtpbgbr2.qq.com (smtpbgbr2.qq.com [54.207.22.56]) by dpdk.org (Postfix) with ESMTP id BB970C90E for ; Tue, 3 Nov 2020 11:07:20 +0100 (CET) X-QQ-mid: bizesmtp26t1604398036tj5q0bi7 Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp10.qq.com (ESMTP) with id ; Tue, 03 Nov 2020 18:07:15 +0800 (CST) X-QQ-SSF: 01400000002000C0C000B00A0000000 X-QQ-FEAT: VGEHthcaPSQMS3x5sCZq/XRl9ttSjolI+WZ0Gn+lRdlyoY3Hxtw1nMEhFZXM9 BTXOt1z0UhZZaqgHCSaO4IHuz2h7CKl1xKADPhfOcjlTqrUwUO7lI7p1VMnaH0N69+CQioK i9YFd15odSffD7d0++cTrnRuzoc2/pUpenYFHqw4x2jKbcObm12DU0GlXfNT20pFvjPVA1Z KrQ+6sUFRYVRF+Wlb2bThV6F4bE4NB/CFZpEZFGDNuMp0b4x/nccvJi2/POh9okMwz8fonQ HD/DSXBd1mmsmm/fzwgpwq0BBgPqG414bZf+OcAVcgIUkvCiIwzj8PBXBzhjUrTObJ/d2lY 0stSkDqy+bC0ZWDU+2tzAlI/eu1rg== X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Tue, 3 Nov 2020 18:08:00 +0800 Message-Id: <20201103100818.311881-20-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20201103100818.311881-1-jiawenwu@trustnetic.com> References: <20201103100818.311881-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign7 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH 19/37] net/txgbe: add RSS filter parse rule X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add support to parse flow for RSS filter. Signed-off-by: Jiawen Wu --- drivers/net/txgbe/txgbe_ethdev.h | 8 +++ drivers/net/txgbe/txgbe_flow.c | 114 +++++++++++++++++++++++++++++++ drivers/net/txgbe/txgbe_rxtx.c | 20 ++++++ 3 files changed, 142 insertions(+) diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h index 88c7d8191..924c57faf 100644 --- a/drivers/net/txgbe/txgbe_ethdev.h +++ b/drivers/net/txgbe/txgbe_ethdev.h @@ -121,6 +121,12 @@ struct txgbe_hw_fdir_info { bool mask_added; /* If already got mask from consistent filter */ }; +struct txgbe_rte_flow_rss_conf { + struct rte_flow_action_rss conf; /**< RSS parameters. */ + uint8_t key[TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t)]; /* Hash key. */ + uint16_t queue[TXGBE_MAX_RX_QUEUE_NUM]; /**< Queues indices to use. */ +}; + /* structure for interrupt relative data */ struct txgbe_interrupt { uint32_t flags; @@ -480,6 +486,8 @@ int txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, uint16_t tx_rate, uint64_t q_msk); int txgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t tx_rate); +int txgbe_rss_conf_init(struct txgbe_rte_flow_rss_conf *out, + const struct rte_flow_action_rss *in); static inline int txgbe_ethertype_filter_lookup(struct txgbe_filter_info *filter_info, uint16_t ethertype) diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c index b7d0e08a9..0d90ef33a 100644 --- a/drivers/net/txgbe/txgbe_flow.c +++ b/drivers/net/txgbe/txgbe_flow.c @@ -2388,3 +2388,117 @@ txgbe_parse_fdir_filter(struct rte_eth_dev *dev, return ret; } +static int +txgbe_parse_rss_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_action actions[], + struct txgbe_rte_flow_rss_conf *rss_conf, + struct rte_flow_error *error) +{ + const struct rte_flow_action *act; + const struct rte_flow_action_rss *rss; + uint16_t n; + + /** + * rss only supports forwarding, + * check if the first not void action is RSS. + */ + act = next_no_void_action(actions, NULL); + if (act->type != RTE_FLOW_ACTION_TYPE_RSS) { + memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + rss = (const struct rte_flow_action_rss *)act->conf; + + if (!rss || !rss->queue_num) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "no valid queues"); + return -rte_errno; + } + + for (n = 0; n < rss->queue_num; n++) { + if (rss->queue[n] >= dev->data->nb_rx_queues) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "queue id > max number of queues"); + return -rte_errno; + } + } + + if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "non-default RSS hash functions are not supported"); + if (rss->level) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "a nonzero RSS encapsulation level is not supported"); + if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "RSS hash key must be exactly 40 bytes"); + if (rss->queue_num > RTE_DIM(rss_conf->queue)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "too many queues for RSS context"); + if (txgbe_rss_conf_init(rss_conf, rss)) + return rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, + "RSS context initialization failure"); + + /* check if the next not void item is END */ + act = next_no_void_action(actions, act); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + memset(rss_conf, 0, sizeof(struct rte_eth_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + /* parse attr */ + /* must be input direction */ + if (!attr->ingress) { + memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, "Only support ingress."); + return -rte_errno; + } + + /* not supported */ + if (attr->egress) { + memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + attr, "Not support egress."); + return -rte_errno; + } + + /* not supported */ + if (attr->transfer) { + memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + + if (attr->priority > 0xFFFF) { + memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Error priority."); + return -rte_errno; + } + + return 0; +} + diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c index 00214b48f..ab812dbff 100644 --- a/drivers/net/txgbe/txgbe_rxtx.c +++ b/drivers/net/txgbe/txgbe_rxtx.c @@ -4633,3 +4633,23 @@ txgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.tx_deferred_start = txq->tx_deferred_start; } +int +txgbe_rss_conf_init(struct txgbe_rte_flow_rss_conf *out, + const struct rte_flow_action_rss *in) +{ + if (in->key_len > RTE_DIM(out->key) || + in->queue_num > RTE_DIM(out->queue)) + return -EINVAL; + out->conf = (struct rte_flow_action_rss){ + .func = in->func, + .level = in->level, + .types = in->types, + .key_len = in->key_len, + .queue_num = in->queue_num, + .key = memcpy(out->key, in->key, in->key_len), + .queue = memcpy(out->queue, in->queue, + sizeof(*in->queue) * in->queue_num), + }; + return 0; +} + From patchwork Tue Nov 3 10:08:01 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 83540 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 5AE00A0521; Tue, 3 Nov 2020 11:14:21 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 51FFDCA18; Tue, 3 Nov 2020 11:07:34 +0100 (CET) Received: from smtpbgsg2.qq.com (smtpbgsg2.qq.com [54.254.200.128]) by dpdk.org (Postfix) with ESMTP id CF6E4C97A for ; Tue, 3 Nov 2020 11:07:21 +0100 (CET) X-QQ-mid: bizesmtp26t1604398037tvre5sy7 Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp10.qq.com (ESMTP) with id ; Tue, 03 Nov 2020 18:07:16 +0800 (CST) X-QQ-SSF: 01400000002000C0C000B00A0000000 X-QQ-FEAT: l6IKqkG+NblrYrmNv8W9gw+ObIG+cXY/H4ORi2FXj6Ojy1e+JGaCCNoDGCkoc PCNop29YrJCCwIll1RaA0mfHl31pMU/kcFh/023IYDiG7bzYDszHaNSFL/rNTEaf0phRZbu 8xK6iDXW7cegpV5z5yozUEWDVjhkvkt7Gn09qgd0bI0nwy2NaZ3v4WZ2ZN/fR02tx42jh8f 2uw89Cp6Tkf8gkD5434hGJ4CkUz1Iv6Enp5axf/g9VeL+XHQ42+NMk9kYdyglMHpwCBa5q0 pYDJigBYSDHABh0gGz8b3hRlzp/OOiIQ/67tk1GMmgSnNszgGbYw76IXD+7v8gCZY3OSvW1 /mZnUJF23FlP/HnlG+cjEjvE2pCDA== X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Tue, 3 Nov 2020 18:08:01 +0800 Message-Id: <20201103100818.311881-21-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20201103100818.311881-1-jiawenwu@trustnetic.com> References: <20201103100818.311881-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign6 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH 20/37] net/txgbe: add RSS filter restore operation X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add restore operation on RSS filter. Signed-off-by: Jiawen Wu --- drivers/net/txgbe/txgbe_ethdev.c | 12 +++++ drivers/net/txgbe/txgbe_ethdev.h | 7 +++ drivers/net/txgbe/txgbe_rxtx.c | 76 ++++++++++++++++++++++++++++++++ 3 files changed, 95 insertions(+) diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c index a17d7b190..5ed96479e 100644 --- a/drivers/net/txgbe/txgbe_ethdev.c +++ b/drivers/net/txgbe/txgbe_ethdev.c @@ -5071,6 +5071,17 @@ txgbe_l2_tn_filter_restore(struct rte_eth_dev *dev) } } +/* restore rss filter */ +static inline void +txgbe_rss_filter_restore(struct rte_eth_dev *dev) +{ + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); + + if (filter_info->rss_info.conf.queue_num) + txgbe_config_rss_filter(dev, + &filter_info->rss_info, TRUE); +} + static int txgbe_filter_restore(struct rte_eth_dev *dev) { @@ -5079,6 +5090,7 @@ txgbe_filter_restore(struct rte_eth_dev *dev) txgbe_syn_filter_restore(dev); txgbe_fdir_filter_restore(dev); txgbe_l2_tn_filter_restore(dev); + txgbe_rss_filter_restore(dev); return 0; } diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h index 924c57faf..df7a14506 100644 --- a/drivers/net/txgbe/txgbe_ethdev.h +++ b/drivers/net/txgbe/txgbe_ethdev.h @@ -237,6 +237,8 @@ struct txgbe_filter_info { struct txgbe_5tuple_filter_list fivetuple_list; /* store the SYN filter info */ uint32_t syn_info; + /* store the rss filter info */ + struct txgbe_rte_flow_rss_conf rss_info; }; struct txgbe_l2_tn_key { @@ -488,6 +490,11 @@ int txgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t tx_rate); int txgbe_rss_conf_init(struct txgbe_rte_flow_rss_conf *out, const struct rte_flow_action_rss *in); +int txgbe_action_rss_same(const struct rte_flow_action_rss *comp, + const struct rte_flow_action_rss *with); +int txgbe_config_rss_filter(struct rte_eth_dev *dev, + struct txgbe_rte_flow_rss_conf *conf, bool add); + static inline int txgbe_ethertype_filter_lookup(struct txgbe_filter_info *filter_info, uint16_t ethertype) diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c index ab812dbff..babda3a79 100644 --- a/drivers/net/txgbe/txgbe_rxtx.c +++ b/drivers/net/txgbe/txgbe_rxtx.c @@ -4653,3 +4653,79 @@ txgbe_rss_conf_init(struct txgbe_rte_flow_rss_conf *out, return 0; } +int +txgbe_action_rss_same(const struct rte_flow_action_rss *comp, + const struct rte_flow_action_rss *with) +{ + return (comp->func == with->func && + comp->level == with->level && + comp->types == with->types && + comp->key_len == with->key_len && + comp->queue_num == with->queue_num && + !memcmp(comp->key, with->key, with->key_len) && + !memcmp(comp->queue, with->queue, + sizeof(*with->queue) * with->queue_num)); +} + +int +txgbe_config_rss_filter(struct rte_eth_dev *dev, + struct txgbe_rte_flow_rss_conf *conf, bool add) +{ + struct txgbe_hw *hw; + uint32_t reta; + uint16_t i; + uint16_t j; + struct rte_eth_rss_conf rss_conf = { + .rss_key = conf->conf.key_len ? + (void *)(uintptr_t)conf->conf.key : NULL, + .rss_key_len = conf->conf.key_len, + .rss_hf = conf->conf.types, + }; + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); + + PMD_INIT_FUNC_TRACE(); + hw = TXGBE_DEV_HW(dev); + + if (!add) { + if (txgbe_action_rss_same(&filter_info->rss_info.conf, + &conf->conf)) { + txgbe_rss_disable(dev); + memset(&filter_info->rss_info, 0, + sizeof(struct txgbe_rte_flow_rss_conf)); + return 0; + } + return -EINVAL; + } + + if (filter_info->rss_info.conf.queue_num) + return -EINVAL; + /* Fill in redirection table + * The byte-swap is needed because NIC registers are in + * little-endian order. + */ + reta = 0; + for (i = 0, j = 0; i < ETH_RSS_RETA_SIZE_128; i++, j++) { + if (j == conf->conf.queue_num) + j = 0; + reta = (reta >> 8) | LS32(conf->conf.queue[j], 24, 0xFF); + if ((i & 3) == 3) + wr32a(hw, TXGBE_REG_RSSTBL, i >> 2, reta); + } + + /* Configure the RSS key and the RSS protocols used to compute + * the RSS hash of input packets. + */ + if ((rss_conf.rss_hf & TXGBE_RSS_OFFLOAD_ALL) == 0) { + txgbe_rss_disable(dev); + return 0; + } + if (rss_conf.rss_key == NULL) + rss_conf.rss_key = rss_intel_key; /* Default hash key */ + txgbe_dev_rss_hash_update(dev, &rss_conf); + + if (txgbe_rss_conf_init(&filter_info->rss_info, &conf->conf)) + return -EINVAL; + + return 0; +} + From patchwork Tue Nov 3 10:08:02 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 83542 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id BD6D2A0521; Tue, 3 Nov 2020 11:15:00 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 43C6ECA2C; Tue, 3 Nov 2020 11:07:37 +0100 (CET) Received: from smtpbgsg2.qq.com (smtpbgsg2.qq.com [54.254.200.128]) by dpdk.org (Postfix) with ESMTP id C5D79C98C for ; Tue, 3 Nov 2020 11:07:22 +0100 (CET) X-QQ-mid: bizesmtp26t1604398038thqvh356 Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp10.qq.com (ESMTP) with id ; Tue, 03 Nov 2020 18:07:18 +0800 (CST) X-QQ-SSF: 01400000002000C0C000B00A0000000 X-QQ-FEAT: F98/UaRJD4Q50H3QvCygfNWAcZdVEwOikCMEmM8ZgHMiQpjy7VRNrOex/NjuZ DcdbxtHWmOmAqjorMr1ABGUBcZGatIRCzkAlNIFemBUPw8JOhFByLkh0m9GBt5+46vL2H/5 FtLPw/dFqqOpbcUw+01x2lhvarwGbZSTQGtqGD3ZwI54yKWDitP2zeMt7mnGoYMEWdtaphB XrpDbGgiRzwGmA7akJO8bQ/3/nbukKQHA5CeUl7BXuRm13D2AIc6S5n2f20Ap07i3CGGfmc Xk+LyPfUH0ZzlULOU7Kp/ZOruLejXxf4RRWNcGG8Q5F1ST1exCI19aHXoG402wUplbWmXa7 qmJiJbxpVbupJqI4ffe3lgNBEQ2rA== X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Tue, 3 Nov 2020 18:08:02 +0800 Message-Id: <20201103100818.311881-22-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20201103100818.311881-1-jiawenwu@trustnetic.com> References: <20201103100818.311881-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign5 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH 21/37] net/txgbe: add filter list init and uninit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add filter list init and uninit. Signed-off-by: Jiawen Wu --- drivers/net/txgbe/txgbe_ethdev.c | 6 ++ drivers/net/txgbe/txgbe_ethdev.h | 8 ++ drivers/net/txgbe/txgbe_flow.c | 127 +++++++++++++++++++++++++++++++ 3 files changed, 141 insertions(+) diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c index 5ed96479e..45a785fec 100644 --- a/drivers/net/txgbe/txgbe_ethdev.c +++ b/drivers/net/txgbe/txgbe_ethdev.c @@ -698,6 +698,9 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) /* initialize l2 tunnel filter list & hash */ txgbe_l2_tn_filter_init(eth_dev); + /* initialize flow filter lists */ + txgbe_filterlist_init(); + /* initialize bandwidth configuration info */ memset(bw_conf, 0, sizeof(struct txgbe_bw_conf)); @@ -1941,6 +1944,9 @@ txgbe_dev_close(struct rte_eth_dev *dev) /* Remove all ntuple filters of the device */ txgbe_ntuple_filter_uninit(dev); + /* clear all the filters list */ + txgbe_filterlist_flush(); + return ret; } diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h index df7a14506..e12324404 100644 --- a/drivers/net/txgbe/txgbe_ethdev.h +++ b/drivers/net/txgbe/txgbe_ethdev.h @@ -263,6 +263,11 @@ struct txgbe_l2_tn_info { uint16_t e_tag_ether_type; /* ether type for e-tag */ }; +struct rte_flow { + enum rte_filter_type filter_type; + void *rule; +}; + /* The configuration of bandwidth */ struct txgbe_bw_conf { uint8_t tc_num; /* Number of TCs. */ @@ -448,6 +453,9 @@ txgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev, int txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev, struct txgbe_l2_tunnel_conf *l2_tunnel); +void txgbe_filterlist_init(void); +void txgbe_filterlist_flush(void); + void txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction, uint8_t queue, uint8_t msix_vector); diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c index 0d90ef33a..77e025863 100644 --- a/drivers/net/txgbe/txgbe_flow.c +++ b/drivers/net/txgbe/txgbe_flow.c @@ -16,6 +16,7 @@ #include #include +#include #include #include #include @@ -29,6 +30,58 @@ #define TXGBE_MAX_N_TUPLE_PRIO 7 #define TXGBE_MAX_FLX_SOURCE_OFF 62 +/* ntuple filter list structure */ +struct txgbe_ntuple_filter_ele { + TAILQ_ENTRY(txgbe_ntuple_filter_ele) entries; + struct rte_eth_ntuple_filter filter_info; +}; +/* ethertype filter list structure */ +struct txgbe_ethertype_filter_ele { + TAILQ_ENTRY(txgbe_ethertype_filter_ele) entries; + struct rte_eth_ethertype_filter filter_info; +}; +/* syn filter list structure */ +struct txgbe_eth_syn_filter_ele { + TAILQ_ENTRY(txgbe_eth_syn_filter_ele) entries; + struct rte_eth_syn_filter filter_info; +}; +/* fdir filter list structure */ +struct txgbe_fdir_rule_ele { + TAILQ_ENTRY(txgbe_fdir_rule_ele) entries; + struct txgbe_fdir_rule filter_info; +}; +/* l2_tunnel filter list structure */ +struct txgbe_eth_l2_tunnel_conf_ele { + TAILQ_ENTRY(txgbe_eth_l2_tunnel_conf_ele) entries; + struct txgbe_l2_tunnel_conf filter_info; +}; +/* rss filter list structure */ +struct txgbe_rss_conf_ele { + TAILQ_ENTRY(txgbe_rss_conf_ele) entries; + struct txgbe_rte_flow_rss_conf filter_info; +}; +/* txgbe_flow memory list structure */ +struct txgbe_flow_mem { + TAILQ_ENTRY(txgbe_flow_mem) entries; + struct rte_flow *flow; +}; + +TAILQ_HEAD(txgbe_ntuple_filter_list, txgbe_ntuple_filter_ele); +TAILQ_HEAD(txgbe_ethertype_filter_list, txgbe_ethertype_filter_ele); +TAILQ_HEAD(txgbe_syn_filter_list, txgbe_eth_syn_filter_ele); +TAILQ_HEAD(txgbe_fdir_rule_filter_list, txgbe_fdir_rule_ele); +TAILQ_HEAD(txgbe_l2_tunnel_filter_list, txgbe_eth_l2_tunnel_conf_ele); +TAILQ_HEAD(txgbe_rss_filter_list, txgbe_rss_conf_ele); +TAILQ_HEAD(txgbe_flow_mem_list, txgbe_flow_mem); + +static struct txgbe_ntuple_filter_list filter_ntuple_list; +static struct txgbe_ethertype_filter_list filter_ethertype_list; +static struct txgbe_syn_filter_list filter_syn_list; +static struct txgbe_fdir_rule_filter_list filter_fdir_list; +static struct txgbe_l2_tunnel_filter_list filter_l2_tunnel_list; +static struct txgbe_rss_filter_list filter_rss_list; +static struct txgbe_flow_mem_list txgbe_flow_list; + /** * Endless loop will never happen with below assumption * 1. there is at least one no-void item(END) @@ -2502,3 +2555,77 @@ txgbe_parse_rss_filter(struct rte_eth_dev *dev, return 0; } +void +txgbe_filterlist_init(void) +{ + TAILQ_INIT(&filter_ntuple_list); + TAILQ_INIT(&filter_ethertype_list); + TAILQ_INIT(&filter_syn_list); + TAILQ_INIT(&filter_fdir_list); + TAILQ_INIT(&filter_l2_tunnel_list); + TAILQ_INIT(&filter_rss_list); + TAILQ_INIT(&txgbe_flow_list); +} + +void +txgbe_filterlist_flush(void) +{ + struct txgbe_ntuple_filter_ele *ntuple_filter_ptr; + struct txgbe_ethertype_filter_ele *ethertype_filter_ptr; + struct txgbe_eth_syn_filter_ele *syn_filter_ptr; + struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr; + struct txgbe_fdir_rule_ele *fdir_rule_ptr; + struct txgbe_flow_mem *txgbe_flow_mem_ptr; + struct txgbe_rss_conf_ele *rss_filter_ptr; + + while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) { + TAILQ_REMOVE(&filter_ntuple_list, + ntuple_filter_ptr, + entries); + rte_free(ntuple_filter_ptr); + } + + while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) { + TAILQ_REMOVE(&filter_ethertype_list, + ethertype_filter_ptr, + entries); + rte_free(ethertype_filter_ptr); + } + + while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) { + TAILQ_REMOVE(&filter_syn_list, + syn_filter_ptr, + entries); + rte_free(syn_filter_ptr); + } + + while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) { + TAILQ_REMOVE(&filter_l2_tunnel_list, + l2_tn_filter_ptr, + entries); + rte_free(l2_tn_filter_ptr); + } + + while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) { + TAILQ_REMOVE(&filter_fdir_list, + fdir_rule_ptr, + entries); + rte_free(fdir_rule_ptr); + } + + while ((rss_filter_ptr = TAILQ_FIRST(&filter_rss_list))) { + TAILQ_REMOVE(&filter_rss_list, + rss_filter_ptr, + entries); + rte_free(rss_filter_ptr); + } + + while ((txgbe_flow_mem_ptr = TAILQ_FIRST(&txgbe_flow_list))) { + TAILQ_REMOVE(&txgbe_flow_list, + txgbe_flow_mem_ptr, + entries); + rte_free(txgbe_flow_mem_ptr->flow); + rte_free(txgbe_flow_mem_ptr); + } +} + From patchwork Tue Nov 3 10:08:03 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 83543 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id E8F87A0521; Tue, 3 Nov 2020 11:15:20 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 9BF63CA37; Tue, 3 Nov 2020 11:07:38 +0100 (CET) Received: from smtpbgau2.qq.com (smtpbgau2.qq.com [54.206.34.216]) by dpdk.org (Postfix) with ESMTP id 44068C9BC for ; Tue, 3 Nov 2020 11:07:24 +0100 (CET) X-QQ-mid: bizesmtp26t1604398039t4v1bfhq Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp10.qq.com (ESMTP) with id ; Tue, 03 Nov 2020 18:07:19 +0800 (CST) X-QQ-SSF: 01400000002000C0C000B00A0000000 X-QQ-FEAT: vCa7dv7JIu7yn0Gtedpmltp9BTkWdCbKrJQkB+W6ULjrHQamKdN2+92RJMHFe fHapfGBWuxNcZjxD8xxk++hzXWd3B0/s/KStzsFsS8YUG3bzdKZnU+0bZ+ikfghdr0c2NSb lYIjk4EpL+/AqgNPxTjgYa85qrxwYDuX68cYMQLbwwG/jT+wJaajPnD7lunDoBCqYpC67AL /gN4TMhDfVq0Ins1wX2na7FVacSQgNmqSSre5Kojr2c0d1U9khb2O/GSWUsWakMg72mTAxu aEt9iN5c9EPB8UMIFDRiyPJ6Ucwo37Cyph5DoyeLf/zVx0C5A2brsMXgcX32ymadxA+/nEK jMyihLITvE3FtwmzZFJrqyRz2fmdw== X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Tue, 3 Nov 2020 18:08:03 +0800 Message-Id: <20201103100818.311881-23-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20201103100818.311881-1-jiawenwu@trustnetic.com> References: <20201103100818.311881-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign6 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH 22/37] net/txgbe: add flow API X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add flow API with generic flow operaions, validate first. Signed-off-by: Jiawen Wu --- doc/guides/nics/features/txgbe.ini | 1 + doc/guides/nics/txgbe.rst | 1 + drivers/net/txgbe/txgbe_ethdev.c | 25 ++++++++++++ drivers/net/txgbe/txgbe_ethdev.h | 2 + drivers/net/txgbe/txgbe_flow.c | 61 ++++++++++++++++++++++++++++++ 5 files changed, 90 insertions(+) diff --git a/doc/guides/nics/features/txgbe.ini b/doc/guides/nics/features/txgbe.ini index 7c457fede..9db2ccde0 100644 --- a/doc/guides/nics/features/txgbe.ini +++ b/doc/guides/nics/features/txgbe.ini @@ -26,6 +26,7 @@ SR-IOV = Y DCB = Y VLAN filter = Y Flow control = Y +Flow API = Y Rate limitation = Y Traffic mirroring = Y CRC offload = P diff --git a/doc/guides/nics/txgbe.rst b/doc/guides/nics/txgbe.rst index cd293698b..5a7299964 100644 --- a/doc/guides/nics/txgbe.rst +++ b/doc/guides/nics/txgbe.rst @@ -29,6 +29,7 @@ Features - IEEE 1588 - FW version - LRO +- Generic flow API Prerequisites ------------- diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c index 45a785fec..cc061e0d6 100644 --- a/drivers/net/txgbe/txgbe_ethdev.c +++ b/drivers/net/txgbe/txgbe_ethdev.c @@ -4234,6 +4234,30 @@ txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, return 0; } +static int +txgbe_dev_filter_ctrl(__rte_unused struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg) +{ + int ret = 0; + + switch (filter_type) { + case RTE_ETH_FILTER_GENERIC: + if (filter_op != RTE_ETH_FILTER_GET) + return -EINVAL; + *(const void **)arg = &txgbe_flow_ops; + break; + default: + PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", + filter_type); + ret = -EINVAL; + break; + } + + return ret; +} + static u8 * txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq) @@ -5218,6 +5242,7 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = { .reta_query = txgbe_dev_rss_reta_query, .rss_hash_update = txgbe_dev_rss_hash_update, .rss_hash_conf_get = txgbe_dev_rss_hash_conf_get, + .filter_ctrl = txgbe_dev_filter_ctrl, .set_mc_addr_list = txgbe_dev_set_mc_addr_list, .rxq_info_get = txgbe_rxq_info_get, .txq_info_get = txgbe_txq_info_get, diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h index e12324404..2b5c0e35d 100644 --- a/drivers/net/txgbe/txgbe_ethdev.h +++ b/drivers/net/txgbe/txgbe_ethdev.h @@ -487,6 +487,8 @@ uint32_t txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val); void txgbe_fdir_filter_restore(struct rte_eth_dev *dev); +extern const struct rte_flow_ops txgbe_flow_ops; + void txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev); void txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev); void txgbe_clear_syn_filter(struct rte_eth_dev *dev); diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c index 77e025863..884a8545f 100644 --- a/drivers/net/txgbe/txgbe_flow.c +++ b/drivers/net/txgbe/txgbe_flow.c @@ -2629,3 +2629,64 @@ txgbe_filterlist_flush(void) } } +/** + * Check if the flow rule is supported by txgbe. + * It only checkes the format. Don't guarantee the rule can be programmed into + * the HW. Because there can be no enough room for the rule. + */ +static int +txgbe_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct rte_eth_ntuple_filter ntuple_filter; + struct rte_eth_ethertype_filter ethertype_filter; + struct rte_eth_syn_filter syn_filter; + struct txgbe_l2_tunnel_conf l2_tn_filter; + struct txgbe_fdir_rule fdir_rule; + struct txgbe_rte_flow_rss_conf rss_conf; + int ret; + + memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter)); + ret = txgbe_parse_ntuple_filter(dev, attr, pattern, + actions, &ntuple_filter, error); + if (!ret) + return 0; + + memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter)); + ret = txgbe_parse_ethertype_filter(dev, attr, pattern, + actions, ðertype_filter, error); + if (!ret) + return 0; + + memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter)); + ret = txgbe_parse_syn_filter(dev, attr, pattern, + actions, &syn_filter, error); + if (!ret) + return 0; + + memset(&fdir_rule, 0, sizeof(struct txgbe_fdir_rule)); + ret = txgbe_parse_fdir_filter(dev, attr, pattern, + actions, &fdir_rule, error); + if (!ret) + return 0; + + memset(&l2_tn_filter, 0, sizeof(struct txgbe_l2_tunnel_conf)); + ret = txgbe_parse_l2_tn_filter(dev, attr, pattern, + actions, &l2_tn_filter, error); + if (!ret) + return 0; + + memset(&rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf)); + ret = txgbe_parse_rss_filter(dev, attr, + actions, &rss_conf, error); + + return ret; +} + +const struct rte_flow_ops txgbe_flow_ops = { + .validate = txgbe_flow_validate, +}; + From patchwork Tue Nov 3 10:08:04 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 83545 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 2F835A0521; Tue, 3 Nov 2020 11:16:00 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 8082CCA4D; Tue, 3 Nov 2020 11:07:42 +0100 (CET) Received: from smtpproxy21.qq.com (smtpbg702.qq.com [203.205.195.102]) by dpdk.org (Postfix) with ESMTP id ADEC8C9BC for ; Tue, 3 Nov 2020 11:07:25 +0100 (CET) X-QQ-mid: bizesmtp26t1604398040tw2w1nm7 Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp10.qq.com (ESMTP) with id ; Tue, 03 Nov 2020 18:07:20 +0800 (CST) X-QQ-SSF: 01400000002000C0C000B00A0000000 X-QQ-FEAT: mr9Nfp8Iec2jr//d21tWsWAGUlSt4e7Es7qbSsmZ4S8y93EYSWjwgIzVLMew2 ZDfh/HhaDiSRoWbc7L+47EmFRpIBZ6TdRZ43kZMhnM6FL2QirDmwKMHuv7/8QZNKegStqc3 ZvLfcrWaG9BV5HAETQ3EJm6IwLZt+aTXEad2CWe9nKZ73ezHi5Kq5JA393+RIP32ghQcU1B 27HGy0H91wo5CV/W45nv8+NB140SoqNkjKQd2EveY5K0rAOiZFEBjky7/EGng3t3XeFQFww LPWs2+fyE29dwfZW4RGU/huQNSHrbl0MqwbIHiIbwA56Eh3xZ6MaIbqW7C2F4yMZUExz+Mn GISDZKtRaNxCdRNUHf7ffksfLVOzw== X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Tue, 3 Nov 2020 18:08:04 +0800 Message-Id: <20201103100818.311881-24-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20201103100818.311881-1-jiawenwu@trustnetic.com> References: <20201103100818.311881-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign5 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH 23/37] net/txgbe: add flow API create function X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add support to create operation for flow API. Signed-off-by: Jiawen Wu --- drivers/net/txgbe/txgbe_ethdev.h | 2 + drivers/net/txgbe/txgbe_fdir.c | 27 ++++ drivers/net/txgbe/txgbe_flow.c | 257 +++++++++++++++++++++++++++++++ 3 files changed, 286 insertions(+) diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h index 2b5c0e35d..f439a201b 100644 --- a/drivers/net/txgbe/txgbe_ethdev.h +++ b/drivers/net/txgbe/txgbe_ethdev.h @@ -464,6 +464,8 @@ void txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction, */ int txgbe_fdir_configure(struct rte_eth_dev *dev); int txgbe_fdir_set_input_mask(struct rte_eth_dev *dev); +int txgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev, + uint16_t offset); int txgbe_fdir_filter_program(struct rte_eth_dev *dev, struct txgbe_fdir_rule *rule, bool del, bool update); diff --git a/drivers/net/txgbe/txgbe_fdir.c b/drivers/net/txgbe/txgbe_fdir.c index 2faf7fd84..2342cf681 100644 --- a/drivers/net/txgbe/txgbe_fdir.c +++ b/drivers/net/txgbe/txgbe_fdir.c @@ -270,6 +270,33 @@ txgbe_fdir_store_input_mask(struct rte_eth_dev *dev) return 0; } +int +txgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev, + uint16_t offset) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + int i; + + for (i = 0; i < 64; i++) { + uint32_t flexreg, flex; + flexreg = rd32(hw, TXGBE_FDIRFLEXCFG(i / 4)); + flex = TXGBE_FDIRFLEXCFG_BASE_MAC; + flex |= TXGBE_FDIRFLEXCFG_OFST(offset / 2); + flexreg &= ~(TXGBE_FDIRFLEXCFG_ALL(~0UL, i % 4)); + flexreg |= TXGBE_FDIRFLEXCFG_ALL(flex, i % 4); + wr32(hw, TXGBE_FDIRFLEXCFG(i / 4), flexreg); + } + + txgbe_flush(hw); + for (i = 0; i < TXGBE_FDIR_INIT_DONE_POLL; i++) { + if (rd32(hw, TXGBE_FDIRCTL) & + TXGBE_FDIRCTL_INITDONE) + break; + msec_delay(1); + } + return 0; +} + /* * txgbe_check_fdir_flex_conf -check if the flex payload and mask configuration * arguments are valid diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c index 884a8545f..4141352bf 100644 --- a/drivers/net/txgbe/txgbe_flow.c +++ b/drivers/net/txgbe/txgbe_flow.c @@ -2629,6 +2629,262 @@ txgbe_filterlist_flush(void) } } +/** + * Create or destroy a flow rule. + * Theorically one rule can match more than one filters. + * We will let it use the filter which it hitt first. + * So, the sequence matters. + */ +static struct rte_flow * +txgbe_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + int ret; + struct rte_eth_ntuple_filter ntuple_filter; + struct rte_eth_ethertype_filter ethertype_filter; + struct rte_eth_syn_filter syn_filter; + struct txgbe_fdir_rule fdir_rule; + struct txgbe_l2_tunnel_conf l2_tn_filter; + struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev); + struct txgbe_rte_flow_rss_conf rss_conf; + struct rte_flow *flow = NULL; + struct txgbe_ntuple_filter_ele *ntuple_filter_ptr; + struct txgbe_ethertype_filter_ele *ethertype_filter_ptr; + struct txgbe_eth_syn_filter_ele *syn_filter_ptr; + struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr; + struct txgbe_fdir_rule_ele *fdir_rule_ptr; + struct txgbe_rss_conf_ele *rss_filter_ptr; + struct txgbe_flow_mem *txgbe_flow_mem_ptr; + uint8_t first_mask = FALSE; + + flow = rte_zmalloc("txgbe_rte_flow", sizeof(struct rte_flow), 0); + if (!flow) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + return (struct rte_flow *)flow; + } + txgbe_flow_mem_ptr = rte_zmalloc("txgbe_flow_mem", + sizeof(struct txgbe_flow_mem), 0); + if (!txgbe_flow_mem_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + rte_free(flow); + return NULL; + } + txgbe_flow_mem_ptr->flow = flow; + TAILQ_INSERT_TAIL(&txgbe_flow_list, + txgbe_flow_mem_ptr, entries); + + memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter)); + ret = txgbe_parse_ntuple_filter(dev, attr, pattern, + actions, &ntuple_filter, error); + + if (!ret) { + ret = txgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE); + if (!ret) { + ntuple_filter_ptr = rte_zmalloc("txgbe_ntuple_filter", + sizeof(struct txgbe_ntuple_filter_ele), 0); + if (!ntuple_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + rte_memcpy(&ntuple_filter_ptr->filter_info, + &ntuple_filter, + sizeof(struct rte_eth_ntuple_filter)); + TAILQ_INSERT_TAIL(&filter_ntuple_list, + ntuple_filter_ptr, entries); + flow->rule = ntuple_filter_ptr; + flow->filter_type = RTE_ETH_FILTER_NTUPLE; + return flow; + } + goto out; + } + + memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter)); + ret = txgbe_parse_ethertype_filter(dev, attr, pattern, + actions, ðertype_filter, error); + if (!ret) { + ret = txgbe_add_del_ethertype_filter(dev, + ðertype_filter, TRUE); + if (!ret) { + ethertype_filter_ptr = + rte_zmalloc("txgbe_ethertype_filter", + sizeof(struct txgbe_ethertype_filter_ele), 0); + if (!ethertype_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + rte_memcpy(ðertype_filter_ptr->filter_info, + ðertype_filter, + sizeof(struct rte_eth_ethertype_filter)); + TAILQ_INSERT_TAIL(&filter_ethertype_list, + ethertype_filter_ptr, entries); + flow->rule = ethertype_filter_ptr; + flow->filter_type = RTE_ETH_FILTER_ETHERTYPE; + return flow; + } + goto out; + } + + memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter)); + ret = txgbe_parse_syn_filter(dev, attr, pattern, + actions, &syn_filter, error); + if (!ret) { + ret = txgbe_syn_filter_set(dev, &syn_filter, TRUE); + if (!ret) { + syn_filter_ptr = rte_zmalloc("txgbe_syn_filter", + sizeof(struct txgbe_eth_syn_filter_ele), 0); + if (!syn_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + rte_memcpy(&syn_filter_ptr->filter_info, + &syn_filter, + sizeof(struct rte_eth_syn_filter)); + TAILQ_INSERT_TAIL(&filter_syn_list, + syn_filter_ptr, + entries); + flow->rule = syn_filter_ptr; + flow->filter_type = RTE_ETH_FILTER_SYN; + return flow; + } + goto out; + } + + memset(&fdir_rule, 0, sizeof(struct txgbe_fdir_rule)); + ret = txgbe_parse_fdir_filter(dev, attr, pattern, + actions, &fdir_rule, error); + if (!ret) { + /* A mask cannot be deleted. */ + if (fdir_rule.b_mask) { + if (!fdir_info->mask_added) { + /* It's the first time the mask is set. */ + rte_memcpy(&fdir_info->mask, + &fdir_rule.mask, + sizeof(struct txgbe_hw_fdir_mask)); + fdir_info->flex_bytes_offset = + fdir_rule.flex_bytes_offset; + + if (fdir_rule.mask.flex_bytes_mask) + txgbe_fdir_set_flexbytes_offset(dev, + fdir_rule.flex_bytes_offset); + + ret = txgbe_fdir_set_input_mask(dev); + if (ret) + goto out; + + fdir_info->mask_added = TRUE; + first_mask = TRUE; + } else { + /** + * Only support one global mask, + * all the masks should be the same. + */ + ret = memcmp(&fdir_info->mask, + &fdir_rule.mask, + sizeof(struct txgbe_hw_fdir_mask)); + if (ret) + goto out; + + if (fdir_info->flex_bytes_offset != + fdir_rule.flex_bytes_offset) + goto out; + } + } + + if (fdir_rule.b_spec) { + ret = txgbe_fdir_filter_program(dev, &fdir_rule, + FALSE, FALSE); + if (!ret) { + fdir_rule_ptr = rte_zmalloc("txgbe_fdir_filter", + sizeof(struct txgbe_fdir_rule_ele), 0); + if (!fdir_rule_ptr) { + PMD_DRV_LOG(ERR, + "failed to allocate memory"); + goto out; + } + rte_memcpy(&fdir_rule_ptr->filter_info, + &fdir_rule, + sizeof(struct txgbe_fdir_rule)); + TAILQ_INSERT_TAIL(&filter_fdir_list, + fdir_rule_ptr, entries); + flow->rule = fdir_rule_ptr; + flow->filter_type = RTE_ETH_FILTER_FDIR; + + return flow; + } + + if (ret) { + /** + * clean the mask_added flag if fail to + * program + **/ + if (first_mask) + fdir_info->mask_added = FALSE; + goto out; + } + } + + goto out; + } + + memset(&l2_tn_filter, 0, sizeof(struct txgbe_l2_tunnel_conf)); + ret = txgbe_parse_l2_tn_filter(dev, attr, pattern, + actions, &l2_tn_filter, error); + if (!ret) { + ret = txgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE); + if (!ret) { + l2_tn_filter_ptr = rte_zmalloc("txgbe_l2_tn_filter", + sizeof(struct txgbe_eth_l2_tunnel_conf_ele), 0); + if (!l2_tn_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + rte_memcpy(&l2_tn_filter_ptr->filter_info, + &l2_tn_filter, + sizeof(struct txgbe_l2_tunnel_conf)); + TAILQ_INSERT_TAIL(&filter_l2_tunnel_list, + l2_tn_filter_ptr, entries); + flow->rule = l2_tn_filter_ptr; + flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL; + return flow; + } + } + + memset(&rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf)); + ret = txgbe_parse_rss_filter(dev, attr, + actions, &rss_conf, error); + if (!ret) { + ret = txgbe_config_rss_filter(dev, &rss_conf, TRUE); + if (!ret) { + rss_filter_ptr = rte_zmalloc("txgbe_rss_filter", + sizeof(struct txgbe_rss_conf_ele), 0); + if (!rss_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + txgbe_rss_conf_init(&rss_filter_ptr->filter_info, + &rss_conf.conf); + TAILQ_INSERT_TAIL(&filter_rss_list, + rss_filter_ptr, entries); + flow->rule = rss_filter_ptr; + flow->filter_type = RTE_ETH_FILTER_HASH; + return flow; + } + } + +out: + TAILQ_REMOVE(&txgbe_flow_list, + txgbe_flow_mem_ptr, entries); + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to create flow."); + rte_free(txgbe_flow_mem_ptr); + rte_free(flow); + return NULL; +} + /** * Check if the flow rule is supported by txgbe. * It only checkes the format. Don't guarantee the rule can be programmed into @@ -2688,5 +2944,6 @@ txgbe_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_ops txgbe_flow_ops = { .validate = txgbe_flow_validate, + .create = txgbe_flow_create, }; From patchwork Tue Nov 3 10:08:05 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 83544 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id BBE62A0521; Tue, 3 Nov 2020 11:15:38 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 13E06CA45; Tue, 3 Nov 2020 11:07:40 +0100 (CET) Received: from smtpproxy21.qq.com (smtpbg704.qq.com [203.205.195.105]) by dpdk.org (Postfix) with ESMTP id D147EC9C0 for ; Tue, 3 Nov 2020 11:07:25 +0100 (CET) X-QQ-mid: bizesmtp26t1604398041tiursvwp Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp10.qq.com (ESMTP) with id ; Tue, 03 Nov 2020 18:07:21 +0800 (CST) X-QQ-SSF: 01400000002000C0C000B00A0000000 X-QQ-FEAT: r3TV06MSMje3KqjgNMqhtTSQsoYfRevkm2b4fYM76VcBLuamLggKkIuoRvwyJ BwNIurtl4oSKgywiVdXXpXdKYeXqJBam6P6NmuqYjSX1l1Vw2Q9nfR3mOxnt04pA30ITXEp i/CykU7WVQFiX7HcB95YYn1Tm+srrS98qkTtK+2Z3/lti9FhUUD+pY5MaTt+ifAvTA3mQzp JqqEefNYEBi3cI6F+Qk2qot+h4VLV6nXEacXy7++wz2vMv9IQ+ojty4vLChp7IiUYykUO98 y99gX901KbLuVuQc3imcK35mWFGj7z0Ewq+VjLfN09QCRyU9DBy9aTHp2IASr7St8x6vSzO 0efx6ar8qg7YsGMLyo= X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Tue, 3 Nov 2020 18:08:05 +0800 Message-Id: <20201103100818.311881-25-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20201103100818.311881-1-jiawenwu@trustnetic.com> References: <20201103100818.311881-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign6 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH 24/37] net/txgbe: add flow API destroy function X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add support to destroy operation for flow API. Signed-off-by: Jiawen Wu --- drivers/net/txgbe/txgbe_flow.c | 128 +++++++++++++++++++++++++++++++++ 1 file changed, 128 insertions(+) diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c index 4141352bf..8d5362175 100644 --- a/drivers/net/txgbe/txgbe_flow.c +++ b/drivers/net/txgbe/txgbe_flow.c @@ -2942,8 +2942,136 @@ txgbe_flow_validate(struct rte_eth_dev *dev, return ret; } +/* Destroy a flow rule on txgbe. */ +static int +txgbe_flow_destroy(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + int ret; + struct rte_flow *pmd_flow = flow; + enum rte_filter_type filter_type = pmd_flow->filter_type; + struct rte_eth_ntuple_filter ntuple_filter; + struct rte_eth_ethertype_filter ethertype_filter; + struct rte_eth_syn_filter syn_filter; + struct txgbe_fdir_rule fdir_rule; + struct txgbe_l2_tunnel_conf l2_tn_filter; + struct txgbe_ntuple_filter_ele *ntuple_filter_ptr; + struct txgbe_ethertype_filter_ele *ethertype_filter_ptr; + struct txgbe_eth_syn_filter_ele *syn_filter_ptr; + struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr; + struct txgbe_fdir_rule_ele *fdir_rule_ptr; + struct txgbe_flow_mem *txgbe_flow_mem_ptr; + struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev); + struct txgbe_rss_conf_ele *rss_filter_ptr; + + switch (filter_type) { + case RTE_ETH_FILTER_NTUPLE: + ntuple_filter_ptr = (struct txgbe_ntuple_filter_ele *) + pmd_flow->rule; + rte_memcpy(&ntuple_filter, + &ntuple_filter_ptr->filter_info, + sizeof(struct rte_eth_ntuple_filter)); + ret = txgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE); + if (!ret) { + TAILQ_REMOVE(&filter_ntuple_list, + ntuple_filter_ptr, entries); + rte_free(ntuple_filter_ptr); + } + break; + case RTE_ETH_FILTER_ETHERTYPE: + ethertype_filter_ptr = (struct txgbe_ethertype_filter_ele *) + pmd_flow->rule; + rte_memcpy(ðertype_filter, + ðertype_filter_ptr->filter_info, + sizeof(struct rte_eth_ethertype_filter)); + ret = txgbe_add_del_ethertype_filter(dev, + ðertype_filter, FALSE); + if (!ret) { + TAILQ_REMOVE(&filter_ethertype_list, + ethertype_filter_ptr, entries); + rte_free(ethertype_filter_ptr); + } + break; + case RTE_ETH_FILTER_SYN: + syn_filter_ptr = (struct txgbe_eth_syn_filter_ele *) + pmd_flow->rule; + rte_memcpy(&syn_filter, + &syn_filter_ptr->filter_info, + sizeof(struct rte_eth_syn_filter)); + ret = txgbe_syn_filter_set(dev, &syn_filter, FALSE); + if (!ret) { + TAILQ_REMOVE(&filter_syn_list, + syn_filter_ptr, entries); + rte_free(syn_filter_ptr); + } + break; + case RTE_ETH_FILTER_FDIR: + fdir_rule_ptr = (struct txgbe_fdir_rule_ele *)pmd_flow->rule; + rte_memcpy(&fdir_rule, + &fdir_rule_ptr->filter_info, + sizeof(struct txgbe_fdir_rule)); + ret = txgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE); + if (!ret) { + TAILQ_REMOVE(&filter_fdir_list, + fdir_rule_ptr, entries); + rte_free(fdir_rule_ptr); + if (TAILQ_EMPTY(&filter_fdir_list)) + fdir_info->mask_added = false; + } + break; + case RTE_ETH_FILTER_L2_TUNNEL: + l2_tn_filter_ptr = (struct txgbe_eth_l2_tunnel_conf_ele *) + pmd_flow->rule; + rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info, + sizeof(struct txgbe_l2_tunnel_conf)); + ret = txgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter); + if (!ret) { + TAILQ_REMOVE(&filter_l2_tunnel_list, + l2_tn_filter_ptr, entries); + rte_free(l2_tn_filter_ptr); + } + break; + case RTE_ETH_FILTER_HASH: + rss_filter_ptr = (struct txgbe_rss_conf_ele *) + pmd_flow->rule; + ret = txgbe_config_rss_filter(dev, + &rss_filter_ptr->filter_info, FALSE); + if (!ret) { + TAILQ_REMOVE(&filter_rss_list, + rss_filter_ptr, entries); + rte_free(rss_filter_ptr); + } + break; + default: + PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", + filter_type); + ret = -EINVAL; + break; + } + + if (ret) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Failed to destroy flow"); + return ret; + } + + TAILQ_FOREACH(txgbe_flow_mem_ptr, &txgbe_flow_list, entries) { + if (txgbe_flow_mem_ptr->flow == pmd_flow) { + TAILQ_REMOVE(&txgbe_flow_list, + txgbe_flow_mem_ptr, entries); + rte_free(txgbe_flow_mem_ptr); + } + } + rte_free(flow); + + return ret; +} + const struct rte_flow_ops txgbe_flow_ops = { .validate = txgbe_flow_validate, .create = txgbe_flow_create, + .destroy = txgbe_flow_destroy, }; From patchwork Tue Nov 3 10:08:06 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 83546 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id A8430A0521; Tue, 3 Nov 2020 11:16:17 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 4C6C7CA54; Tue, 3 Nov 2020 11:07:44 +0100 (CET) Received: from smtpproxy21.qq.com (smtpbg702.qq.com [203.205.195.102]) by dpdk.org (Postfix) with ESMTP id 6FB7BC9D2 for ; Tue, 3 Nov 2020 11:07:26 +0100 (CET) X-QQ-mid: bizesmtp26t1604398043t0nbknsp Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp10.qq.com (ESMTP) with id ; Tue, 03 Nov 2020 18:07:22 +0800 (CST) X-QQ-SSF: 01400000002000C0C000B00A0000000 X-QQ-FEAT: Oux8rCUi2/hcBOyDaitxgXC7XebLZo1QVnBsJCsiuntZV9S4pgstklApcHI+k jslx16CYswO+PI+ZCM/v9pzDDSrQBPr41DbhBT8ACmLs6Yuc4x7fLocxVK1nrGQn2g1dcZa ozliYv0PQhAPA/BZZx1EhLXEw1o6iRopXRdn6YwhrRSt2TEY7ARXyRCl/unSqAMxjaihY1g tjzU797GURsDAJNyYr0eM2Y/xIOEL9Z51qJxfQh4v/fEhyyxa9J8Wztftg7ltjUheUW3ebK tMeAn3E54jNbLkoCvTJAOlxjrP+ajnK8IoRNCX0IWU+ys44Swc/HSh7x6xHBGcJ29Z6YlWj zzpuSJyFAzZu6fqovvyNF8LBFsrdw== X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Tue, 3 Nov 2020 18:08:06 +0800 Message-Id: <20201103100818.311881-26-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20201103100818.311881-1-jiawenwu@trustnetic.com> References: <20201103100818.311881-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign7 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH 25/37] net/txgbe: add flow API flush function X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add support to flush operation for flow API. Signed-off-by: Jiawen Wu --- drivers/net/txgbe/base/txgbe_hw.c | 87 +++++++++++++++++++++++++++++++ drivers/net/txgbe/base/txgbe_hw.h | 1 + drivers/net/txgbe/txgbe_ethdev.c | 21 ++++++++ drivers/net/txgbe/txgbe_ethdev.h | 2 + drivers/net/txgbe/txgbe_fdir.c | 47 +++++++++++++++++ drivers/net/txgbe/txgbe_flow.c | 43 +++++++++++++++ 6 files changed, 201 insertions(+) diff --git a/drivers/net/txgbe/base/txgbe_hw.c b/drivers/net/txgbe/base/txgbe_hw.c index 5ee13b0f8..dc419d7d4 100644 --- a/drivers/net/txgbe/base/txgbe_hw.c +++ b/drivers/net/txgbe/base/txgbe_hw.c @@ -3649,6 +3649,93 @@ s32 txgbe_reset_hw(struct txgbe_hw *hw) return status; } +/** + * txgbe_fdir_check_cmd_complete - poll to check whether FDIRPICMD is complete + * @hw: pointer to hardware structure + * @fdircmd: current value of FDIRCMD register + */ +static s32 txgbe_fdir_check_cmd_complete(struct txgbe_hw *hw, u32 *fdircmd) +{ + int i; + + for (i = 0; i < TXGBE_FDIRCMD_CMD_POLL; i++) { + *fdircmd = rd32(hw, TXGBE_FDIRPICMD); + if (!(*fdircmd & TXGBE_FDIRPICMD_OP_MASK)) + return 0; + usec_delay(10); + } + + return TXGBE_ERR_FDIR_CMD_INCOMPLETE; +} + +/** + * txgbe_reinit_fdir_tables - Reinitialize Flow Director tables. + * @hw: pointer to hardware structure + **/ +s32 txgbe_reinit_fdir_tables(struct txgbe_hw *hw) +{ + s32 err; + int i; + u32 fdirctrl = rd32(hw, TXGBE_FDIRCTL); + u32 fdircmd; + fdirctrl &= ~TXGBE_FDIRCTL_INITDONE; + + DEBUGFUNC("txgbe_reinit_fdir_tables"); + + /* + * Before starting reinitialization process, + * FDIRPICMD.OP must be zero. + */ + err = txgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err) { + DEBUGOUT("Flow Director previous command did not complete, aborting table re-initialization.\n"); + return err; + } + + wr32(hw, TXGBE_FDIRFREE, 0); + txgbe_flush(hw); + /* + * adapters flow director init flow cannot be restarted, + * Workaround silicon errata by performing the following steps + * before re-writing the FDIRCTL control register with the same value. + * - write 1 to bit 8 of FDIRPICMD register & + * - write 0 to bit 8 of FDIRPICMD register + */ + wr32m(hw, TXGBE_FDIRPICMD, TXGBE_FDIRPICMD_CLR, TXGBE_FDIRPICMD_CLR); + txgbe_flush(hw); + wr32m(hw, TXGBE_FDIRPICMD, TXGBE_FDIRPICMD_CLR, 0); + txgbe_flush(hw); + /* + * Clear FDIR Hash register to clear any leftover hashes + * waiting to be programmed. + */ + wr32(hw, TXGBE_FDIRPIHASH, 0x00); + txgbe_flush(hw); + + wr32(hw, TXGBE_FDIRCTL, fdirctrl); + txgbe_flush(hw); + + /* Poll init-done after we write FDIRCTL register */ + for (i = 0; i < TXGBE_FDIR_INIT_DONE_POLL; i++) { + if (rd32m(hw, TXGBE_FDIRCTL, TXGBE_FDIRCTL_INITDONE)) + break; + msec_delay(1); + } + if (i >= TXGBE_FDIR_INIT_DONE_POLL) { + DEBUGOUT("Flow Director Signature poll time exceeded!\n"); + return TXGBE_ERR_FDIR_REINIT_FAILED; + } + + /* Clear FDIR statistics registers (read to clear) */ + rd32(hw, TXGBE_FDIRUSED); + rd32(hw, TXGBE_FDIRFAIL); + rd32(hw, TXGBE_FDIRMATCH); + rd32(hw, TXGBE_FDIRMISS); + rd32(hw, TXGBE_FDIRLEN); + + return 0; +} + /** * txgbe_start_hw_raptor - Prepare hardware for Tx/Rx * @hw: pointer to hardware structure diff --git a/drivers/net/txgbe/base/txgbe_hw.h b/drivers/net/txgbe/base/txgbe_hw.h index 09298ea0c..a7473e7e5 100644 --- a/drivers/net/txgbe/base/txgbe_hw.h +++ b/drivers/net/txgbe/base/txgbe_hw.h @@ -108,5 +108,6 @@ s32 txgbe_init_phy_raptor(struct txgbe_hw *hw); s32 txgbe_enable_rx_dma_raptor(struct txgbe_hw *hw, u32 regval); s32 txgbe_prot_autoc_read_raptor(struct txgbe_hw *hw, bool *locked, u64 *value); s32 txgbe_prot_autoc_write_raptor(struct txgbe_hw *hw, bool locked, u64 value); +s32 txgbe_reinit_fdir_tables(struct txgbe_hw *hw); bool txgbe_verify_lesm_fw_enabled_raptor(struct txgbe_hw *hw); #endif /* _TXGBE_HW_H_ */ diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c index cc061e0d6..83c078a2a 100644 --- a/drivers/net/txgbe/txgbe_ethdev.c +++ b/drivers/net/txgbe/txgbe_ethdev.c @@ -5186,6 +5186,27 @@ txgbe_clear_syn_filter(struct rte_eth_dev *dev) } } +/* remove all the L2 tunnel filters */ +int +txgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev) +{ + struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev); + struct txgbe_l2_tn_filter *l2_tn_filter; + struct txgbe_l2_tunnel_conf l2_tn_conf; + int ret = 0; + + while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) { + l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type; + l2_tn_conf.tunnel_id = l2_tn_filter->key.tn_id; + l2_tn_conf.pool = l2_tn_filter->pool; + ret = txgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf); + if (ret < 0) + return ret; + } + + return 0; +} + static const struct eth_dev_ops txgbe_eth_dev_ops = { .dev_configure = txgbe_dev_configure, .dev_infos_get = txgbe_dev_info_get, diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h index f439a201b..1df74ab9b 100644 --- a/drivers/net/txgbe/txgbe_ethdev.h +++ b/drivers/net/txgbe/txgbe_ethdev.h @@ -488,12 +488,14 @@ int txgbe_pf_host_configure(struct rte_eth_dev *eth_dev); uint32_t txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val); void txgbe_fdir_filter_restore(struct rte_eth_dev *dev); +int txgbe_clear_all_fdir_filter(struct rte_eth_dev *dev); extern const struct rte_flow_ops txgbe_flow_ops; void txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev); void txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev); void txgbe_clear_syn_filter(struct rte_eth_dev *dev); +int txgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev); int txgbe_vt_check(struct txgbe_hw *hw); int txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, diff --git a/drivers/net/txgbe/txgbe_fdir.c b/drivers/net/txgbe/txgbe_fdir.c index 2342cf681..6ddb023c0 100644 --- a/drivers/net/txgbe/txgbe_fdir.c +++ b/drivers/net/txgbe/txgbe_fdir.c @@ -902,6 +902,27 @@ txgbe_fdir_filter_program(struct rte_eth_dev *dev, return err; } +static int +txgbe_fdir_flush(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_hw_fdir_info *info = TXGBE_DEV_FDIR(dev); + int ret; + + ret = txgbe_reinit_fdir_tables(hw); + if (ret < 0) { + PMD_INIT_LOG(ERR, "Failed to re-initialize FD table."); + return ret; + } + + info->f_add = 0; + info->f_remove = 0; + info->add = 0; + info->remove = 0; + + return ret; +} + /* restore flow director filter */ void txgbe_fdir_filter_restore(struct rte_eth_dev *dev) @@ -936,3 +957,29 @@ txgbe_fdir_filter_restore(struct rte_eth_dev *dev) } } +/* remove all the flow director filters */ +int +txgbe_clear_all_fdir_filter(struct rte_eth_dev *dev) +{ + struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev); + struct txgbe_fdir_filter *fdir_filter; + struct txgbe_fdir_filter *filter_flag; + int ret = 0; + + /* flush flow director */ + rte_hash_reset(fdir_info->hash_handle); + memset(fdir_info->hash_map, 0, + sizeof(struct txgbe_fdir_filter *) * TXGBE_MAX_FDIR_FILTER_NUM); + filter_flag = TAILQ_FIRST(&fdir_info->fdir_list); + while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) { + TAILQ_REMOVE(&fdir_info->fdir_list, + fdir_filter, + entries); + rte_free(fdir_filter); + } + + if (filter_flag != NULL) + ret = txgbe_fdir_flush(dev); + + return ret; +} diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c index 8d5362175..b5f4073e2 100644 --- a/drivers/net/txgbe/txgbe_flow.c +++ b/drivers/net/txgbe/txgbe_flow.c @@ -2555,6 +2555,16 @@ txgbe_parse_rss_filter(struct rte_eth_dev *dev, return 0; } +/* remove the rss filter */ +static void +txgbe_clear_rss_filter(struct rte_eth_dev *dev) +{ + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); + + if (filter_info->rss_info.conf.queue_num) + txgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE); +} + void txgbe_filterlist_init(void) { @@ -3069,9 +3079,42 @@ txgbe_flow_destroy(struct rte_eth_dev *dev, return ret; } +/* Destroy all flow rules associated with a port on txgbe. */ +static int +txgbe_flow_flush(struct rte_eth_dev *dev, + struct rte_flow_error *error) +{ + int ret = 0; + + txgbe_clear_all_ntuple_filter(dev); + txgbe_clear_all_ethertype_filter(dev); + txgbe_clear_syn_filter(dev); + + ret = txgbe_clear_all_fdir_filter(dev); + if (ret < 0) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Failed to flush rule"); + return ret; + } + + ret = txgbe_clear_all_l2_tn_filter(dev); + if (ret < 0) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Failed to flush rule"); + return ret; + } + + txgbe_clear_rss_filter(dev); + + txgbe_filterlist_flush(); + + return 0; +} + const struct rte_flow_ops txgbe_flow_ops = { .validate = txgbe_flow_validate, .create = txgbe_flow_create, .destroy = txgbe_flow_destroy, + .flush = txgbe_flow_flush, }; From patchwork Tue Nov 3 10:08:07 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 83547 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 131B1A0521; Tue, 3 Nov 2020 11:16:36 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id BF8CCCA59; Tue, 3 Nov 2020 11:07:45 +0100 (CET) Received: from smtpbgeu1.qq.com (smtpbgeu1.qq.com [52.59.177.22]) by dpdk.org (Postfix) with ESMTP id CD393C8C4 for ; Tue, 3 Nov 2020 11:07:28 +0100 (CET) X-QQ-mid: bizesmtp26t1604398044tmsnswqq Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp10.qq.com (ESMTP) with id ; Tue, 03 Nov 2020 18:07:23 +0800 (CST) X-QQ-SSF: 01400000002000C0C000B00A0000000 X-QQ-FEAT: 4ufqJHNJpU0K/dDFGjORXKEr4vz8+ADM/NSH9Lq4pKETFF58xwiatu/UhSr6U yWVtIZukDyzx0U8RHjuIcpzD4s8kFGtwQAWrMx4T/hoLVHUrhOmvFMNjeIMde8tp2M3E/KR T2cvNZCGEn4y/L/93/0dPGgzpjZm7WU2nzeygojvn+dP6GNC355ZXczIFmBBPua8jQw/DGC 9VV3OM6G2EZmbEuilIuB8tvS3Rgqp2CH9RDf2ZX7GaLsikaaUdFkC2mv2714gOfz3V1mAwA iq53UUDZwyKGpmX6euNXqy582iRLNScS95M6IRf8jTAu7qMwEasN2kihF0anKtxHie4aGyq dBMpJhZnSi79UOuOTg6cscm5+26Og== X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Tue, 3 Nov 2020 18:08:07 +0800 Message-Id: <20201103100818.311881-27-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20201103100818.311881-1-jiawenwu@trustnetic.com> References: <20201103100818.311881-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign6 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH 26/37] net/txgbe: support UDP tunnel port add and delete X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Support UDP tunnel port add and delete operations. Signed-off-by: Jiawen Wu --- drivers/net/txgbe/txgbe_ethdev.c | 105 +++++++++++++++++++++++++++++++ 1 file changed, 105 insertions(+) diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c index 83c078a2a..337ebf2e0 100644 --- a/drivers/net/txgbe/txgbe_ethdev.c +++ b/drivers/net/txgbe/txgbe_ethdev.c @@ -5038,6 +5038,109 @@ txgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en) return ret; } +/* Add UDP tunneling port */ +static int +txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + int ret = 0; + + if (udp_tunnel == NULL) + return -EINVAL; + + switch (udp_tunnel->prot_type) { + case RTE_TUNNEL_TYPE_VXLAN: + if (udp_tunnel->udp_port == 0) { + PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed."); + ret = -EINVAL; + break; + } + wr32(hw, TXGBE_VXLANPORT, udp_tunnel->udp_port); + wr32(hw, TXGBE_VXLANPORTGPE, udp_tunnel->udp_port); + break; + case RTE_TUNNEL_TYPE_GENEVE: + if (udp_tunnel->udp_port == 0) { + PMD_DRV_LOG(ERR, "Add Geneve port 0 is not allowed."); + ret = -EINVAL; + break; + } + wr32(hw, TXGBE_GENEVEPORT, udp_tunnel->udp_port); + break; + case RTE_TUNNEL_TYPE_TEREDO: + if (udp_tunnel->udp_port == 0) { + PMD_DRV_LOG(ERR, "Add Teredo port 0 is not allowed."); + ret = -EINVAL; + break; + } + wr32(hw, TXGBE_TEREDOPORT, udp_tunnel->udp_port); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + txgbe_flush(hw); + + return ret; +} + +/* Remove UDP tunneling port */ +static int +txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + int ret = 0; + uint16_t cur_port; + + if (udp_tunnel == NULL) + return -EINVAL; + + switch (udp_tunnel->prot_type) { + case RTE_TUNNEL_TYPE_VXLAN: + cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORT); + if (cur_port != udp_tunnel->udp_port) { + PMD_DRV_LOG(ERR, "Port %u does not exist.", + udp_tunnel->udp_port); + ret = -EINVAL; + break; + } + wr32(hw, TXGBE_VXLANPORT, 0); + wr32(hw, TXGBE_VXLANPORTGPE, 0); + break; + case RTE_TUNNEL_TYPE_GENEVE: + cur_port = (uint16_t)rd32(hw, TXGBE_GENEVEPORT); + if (cur_port != udp_tunnel->udp_port) { + PMD_DRV_LOG(ERR, "Port %u does not exist.", + udp_tunnel->udp_port); + ret = -EINVAL; + break; + } + wr32(hw, TXGBE_GENEVEPORT, 0); + break; + case RTE_TUNNEL_TYPE_TEREDO: + cur_port = (uint16_t)rd32(hw, TXGBE_TEREDOPORT); + if (cur_port != udp_tunnel->udp_port) { + PMD_DRV_LOG(ERR, "Port %u does not exist.", + udp_tunnel->udp_port); + ret = -EINVAL; + break; + } + wr32(hw, TXGBE_TEREDOPORT, 0); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + txgbe_flush(hw); + + return ret; +} + /* restore n-tuple filter */ static inline void txgbe_ntuple_filter_restore(struct rte_eth_dev *dev) @@ -5281,6 +5384,8 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = { .timesync_adjust_time = txgbe_timesync_adjust_time, .timesync_read_time = txgbe_timesync_read_time, .timesync_write_time = txgbe_timesync_write_time, + .udp_tunnel_port_add = txgbe_dev_udp_tunnel_port_add, + .udp_tunnel_port_del = txgbe_dev_udp_tunnel_port_del, .tx_done_cleanup = txgbe_dev_tx_done_cleanup, }; From patchwork Tue Nov 3 10:08:08 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 83549 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 9F2BFA0521; Tue, 3 Nov 2020 11:17:21 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id E9E7ACA89; Tue, 3 Nov 2020 11:07:48 +0100 (CET) Received: from smtpbg506.qq.com (smtpbg506.qq.com [203.205.250.33]) by dpdk.org (Postfix) with ESMTP id B5D88C9EE for ; Tue, 3 Nov 2020 11:07:30 +0100 (CET) X-QQ-mid: bizesmtp26t1604398045tmwbbduo Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp10.qq.com (ESMTP) with id ; Tue, 03 Nov 2020 18:07:25 +0800 (CST) X-QQ-SSF: 01400000002000C0C000B00A0000000 X-QQ-FEAT: VP6n2TeHyWUCdYDOfYSxdE31BnLa7Nv1Ta7OJF8u8ggKLIusl0ZCBQJxfiGpK XSvfNLTMBNUM1ZiG1wB0MxSdTZDfYV950CiNJtK98zY9afmCKDPO+5eLo0E1NDHdngb0HBj coVmH9ESIV9wbPoeakuk3yHQD6tD3Lh/7NrrPlgLpWuKFR2tWusWpRN4RcKNUcO4Mo7Gnyh J//K7qVI2LUh5yW/9k1J+dMBFfyLBo8cN4CYGdaTrBNDD7iAyK3UcYdZ1Y1jgqk3quDbv9r dwxIIP8NZ9WhrLDCXS6v+f33yiA+zjIyQ+fDonGO73tUkztKbXySQgHQscgWzBsVyJFB1SN tUvl3Qkr+SaGLKV8d9gsWMSCOrQ5Sy3uCWMH3YE X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Tue, 3 Nov 2020 18:08:08 +0800 Message-Id: <20201103100818.311881-28-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20201103100818.311881-1-jiawenwu@trustnetic.com> References: <20201103100818.311881-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign5 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH 27/37] net/txgbe: add TM configuration init and uninit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add traffic manager configuration init and uninit operations. Signed-off-by: Jiawen Wu --- drivers/net/txgbe/meson.build | 1 + drivers/net/txgbe/txgbe_ethdev.c | 16 +++++++++ drivers/net/txgbe/txgbe_ethdev.h | 60 ++++++++++++++++++++++++++++++++ drivers/net/txgbe/txgbe_tm.c | 57 ++++++++++++++++++++++++++++++ 4 files changed, 134 insertions(+) create mode 100644 drivers/net/txgbe/txgbe_tm.c diff --git a/drivers/net/txgbe/meson.build b/drivers/net/txgbe/meson.build index bb1683631..352baad8b 100644 --- a/drivers/net/txgbe/meson.build +++ b/drivers/net/txgbe/meson.build @@ -11,6 +11,7 @@ sources = files( 'txgbe_ptypes.c', 'txgbe_pf.c', 'txgbe_rxtx.c', + 'txgbe_tm.c', ) deps += ['hash'] diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c index 337ebf2e0..2b73abae6 100644 --- a/drivers/net/txgbe/txgbe_ethdev.c +++ b/drivers/net/txgbe/txgbe_ethdev.c @@ -704,6 +704,9 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) /* initialize bandwidth configuration info */ memset(bw_conf, 0, sizeof(struct txgbe_bw_conf)); + /* initialize Traffic Manager configuration */ + txgbe_tm_conf_init(eth_dev); + return 0; } @@ -1545,6 +1548,7 @@ txgbe_dev_start(struct rte_eth_dev *dev) int status; uint16_t vf, idx; uint32_t *link_speeds; + struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev); PMD_INIT_FUNC_TRACE(); @@ -1748,6 +1752,11 @@ txgbe_dev_start(struct rte_eth_dev *dev) txgbe_l2_tunnel_conf(dev); txgbe_filter_restore(dev); + if (tm_conf->root && !tm_conf->committed) + PMD_DRV_LOG(WARNING, + "please call hierarchy_commit() " + "before starting the port"); + /* * Update link status right before return, because it may * start link configuration process in a separate thread. @@ -1780,6 +1789,7 @@ txgbe_dev_stop(struct rte_eth_dev *dev) struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; int vf; + struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev); if (hw->adapter_stopped) return 0; @@ -1832,6 +1842,9 @@ txgbe_dev_stop(struct rte_eth_dev *dev) intr_handle->intr_vec = NULL; } + /* reset hierarchy commit */ + tm_conf->committed = false; + adapter->rss_reta_updated = 0; wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_SEL_MASK); @@ -1947,6 +1960,9 @@ txgbe_dev_close(struct rte_eth_dev *dev) /* clear all the filters list */ txgbe_filterlist_flush(); + /* Remove all Traffic Manager configuration */ + txgbe_tm_conf_uninit(dev); + return ret; } diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h index 1df74ab9b..0f1a39918 100644 --- a/drivers/net/txgbe/txgbe_ethdev.h +++ b/drivers/net/txgbe/txgbe_ethdev.h @@ -14,6 +14,7 @@ #include #include #include +#include /* need update link, bit flag */ #define TXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0) @@ -273,6 +274,60 @@ struct txgbe_bw_conf { uint8_t tc_num; /* Number of TCs. */ }; +/* Struct to store Traffic Manager shaper profile. */ +struct txgbe_tm_shaper_profile { + TAILQ_ENTRY(txgbe_tm_shaper_profile) node; + uint32_t shaper_profile_id; + uint32_t reference_count; + struct rte_tm_shaper_params profile; +}; + +TAILQ_HEAD(txgbe_shaper_profile_list, txgbe_tm_shaper_profile); + +/* Struct to store Traffic Manager node configuration. */ +struct txgbe_tm_node { + TAILQ_ENTRY(txgbe_tm_node) node; + uint32_t id; + uint32_t priority; + uint32_t weight; + uint32_t reference_count; + uint16_t no; + struct txgbe_tm_node *parent; + struct txgbe_tm_shaper_profile *shaper_profile; + struct rte_tm_node_params params; +}; + +TAILQ_HEAD(txgbe_tm_node_list, txgbe_tm_node); + +/* The configuration of Traffic Manager */ +struct txgbe_tm_conf { + struct txgbe_shaper_profile_list shaper_profile_list; + struct txgbe_tm_node *root; /* root node - port */ + struct txgbe_tm_node_list tc_list; /* node list for all the TCs */ + struct txgbe_tm_node_list queue_list; /* node list for all the queues */ + /** + * The number of added TC nodes. + * It should be no more than the TC number of this port. + */ + uint32_t nb_tc_node; + /** + * The number of added queue nodes. + * It should be no more than the queue number of this port. + */ + uint32_t nb_queue_node; + /** + * This flag is used to check if APP can change the TM node + * configuration. + * When it's true, means the configuration is applied to HW, + * APP should not change the configuration. + * As we don't support on-the-fly configuration, when starting + * the port, APP should call the hierarchy_commit API to set this + * flag to true. When stopping the port, this flag should be set + * to false. + */ + bool committed; +}; + /* * Structure to store private data for each driver instance (for each port). */ @@ -295,6 +350,7 @@ struct txgbe_adapter { struct rte_timecounter systime_tc; struct rte_timecounter rx_tstamp_tc; struct rte_timecounter tx_tstamp_tc; + struct txgbe_tm_conf tm_conf; /* For RSS reta table update */ uint8_t rss_reta_updated; @@ -345,6 +401,8 @@ struct txgbe_adapter { #define TXGBE_DEV_BW_CONF(dev) \ (&((struct txgbe_adapter *)(dev)->data->dev_private)->bw_conf) +#define TXGBE_DEV_TM_CONF(dev) \ + (&((struct txgbe_adapter *)(dev)->data->dev_private)->tm_conf) /* * RX/TX function prototypes @@ -500,6 +558,8 @@ int txgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev); int txgbe_vt_check(struct txgbe_hw *hw); int txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, uint16_t tx_rate, uint64_t q_msk); +void txgbe_tm_conf_init(struct rte_eth_dev *dev); +void txgbe_tm_conf_uninit(struct rte_eth_dev *dev); int txgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t tx_rate); int txgbe_rss_conf_init(struct txgbe_rte_flow_rss_conf *out, diff --git a/drivers/net/txgbe/txgbe_tm.c b/drivers/net/txgbe/txgbe_tm.c new file mode 100644 index 000000000..78f426964 --- /dev/null +++ b/drivers/net/txgbe/txgbe_tm.c @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2015-2020 + */ + +#include + +#include "txgbe_ethdev.h" + +void +txgbe_tm_conf_init(struct rte_eth_dev *dev) +{ + struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev); + + /* initialize shaper profile list */ + TAILQ_INIT(&tm_conf->shaper_profile_list); + + /* initialize node configuration */ + tm_conf->root = NULL; + TAILQ_INIT(&tm_conf->queue_list); + TAILQ_INIT(&tm_conf->tc_list); + tm_conf->nb_tc_node = 0; + tm_conf->nb_queue_node = 0; + tm_conf->committed = false; +} + +void +txgbe_tm_conf_uninit(struct rte_eth_dev *dev) +{ + struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev); + struct txgbe_tm_shaper_profile *shaper_profile; + struct txgbe_tm_node *tm_node; + + /* clear node configuration */ + while ((tm_node = TAILQ_FIRST(&tm_conf->queue_list))) { + TAILQ_REMOVE(&tm_conf->queue_list, tm_node, node); + rte_free(tm_node); + } + tm_conf->nb_queue_node = 0; + while ((tm_node = TAILQ_FIRST(&tm_conf->tc_list))) { + TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node); + rte_free(tm_node); + } + tm_conf->nb_tc_node = 0; + if (tm_conf->root) { + rte_free(tm_conf->root); + tm_conf->root = NULL; + } + + /* Remove all shaper profiles */ + while ((shaper_profile = + TAILQ_FIRST(&tm_conf->shaper_profile_list))) { + TAILQ_REMOVE(&tm_conf->shaper_profile_list, + shaper_profile, node); + rte_free(shaper_profile); + } +} + From patchwork Tue Nov 3 10:08:09 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 83548 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 40B90A0521; Tue, 3 Nov 2020 11:16:57 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 2175ECA62; Tue, 3 Nov 2020 11:07:47 +0100 (CET) Received: from smtpbgeu1.qq.com (smtpbgeu1.qq.com [52.59.177.22]) by dpdk.org (Postfix) with ESMTP id 674C0C9EA for ; Tue, 3 Nov 2020 11:07:30 +0100 (CET) X-QQ-mid: bizesmtp26t1604398046t647dky2 Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp10.qq.com (ESMTP) with id ; Tue, 03 Nov 2020 18:07:26 +0800 (CST) X-QQ-SSF: 01400000002000C0C000B00A0000000 X-QQ-FEAT: y3iK4Lsvf4Dck9wEvRvp4DjPKaV+2IURc/IhruuJDpjScCE6k0SWcwmhrJG1G 33g2XGz6YCnm1NZ1iWcvougyUsFxLUDLnYqc/oq3yD/O4mhyXBeUohOt9a8itfM8bygGOPS rm+z52mc2+Ros9CNe3VtDBMcwe3jEDF0nilviaDb3gKam8frBxzIE7lgEeez9yTbqWBKAyr +2l705z9aK58MsYQw3trCc1rQ+EbKswA2JapkG+iKATbFFDsj7Ia70CLTiGoEQNAuOTalO0 l5QZNozuAENqfT5G61h68qlBzb9/BHrH3aN2PAsGU+FFp2Ah4iHuhQ8Msz2KmisnuV9uf5E BDU/26QjqLlgahnJ9WQcdp3hfW0dA== X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Tue, 3 Nov 2020 18:08:09 +0800 Message-Id: <20201103100818.311881-29-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20201103100818.311881-1-jiawenwu@trustnetic.com> References: <20201103100818.311881-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign5 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH 28/37] net/txgbe: add TM capabilities get operation X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add support to get traffic manager capabilities. Signed-off-by: Jiawen Wu --- drivers/net/txgbe/txgbe_ethdev.c | 1 + drivers/net/txgbe/txgbe_ethdev.h | 9 + drivers/net/txgbe/txgbe_tm.c | 278 +++++++++++++++++++++++++++++++ 3 files changed, 288 insertions(+) diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c index 2b73abae6..80ea1038c 100644 --- a/drivers/net/txgbe/txgbe_ethdev.c +++ b/drivers/net/txgbe/txgbe_ethdev.c @@ -5402,6 +5402,7 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = { .timesync_write_time = txgbe_timesync_write_time, .udp_tunnel_port_add = txgbe_dev_udp_tunnel_port_add, .udp_tunnel_port_del = txgbe_dev_udp_tunnel_port_del, + .tm_ops_get = txgbe_tm_ops_get, .tx_done_cleanup = txgbe_dev_tx_done_cleanup, }; diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h index 0f1a39918..cf377809e 100644 --- a/drivers/net/txgbe/txgbe_ethdev.h +++ b/drivers/net/txgbe/txgbe_ethdev.h @@ -284,6 +284,14 @@ struct txgbe_tm_shaper_profile { TAILQ_HEAD(txgbe_shaper_profile_list, txgbe_tm_shaper_profile); +/* node type of Traffic Manager */ +enum txgbe_tm_node_type { + TXGBE_TM_NODE_TYPE_PORT, + TXGBE_TM_NODE_TYPE_TC, + TXGBE_TM_NODE_TYPE_QUEUE, + TXGBE_TM_NODE_TYPE_MAX, +}; + /* Struct to store Traffic Manager node configuration. */ struct txgbe_tm_node { TAILQ_ENTRY(txgbe_tm_node) node; @@ -558,6 +566,7 @@ int txgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev); int txgbe_vt_check(struct txgbe_hw *hw); int txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, uint16_t tx_rate, uint64_t q_msk); +int txgbe_tm_ops_get(struct rte_eth_dev *dev, void *ops); void txgbe_tm_conf_init(struct rte_eth_dev *dev); void txgbe_tm_conf_uninit(struct rte_eth_dev *dev); int txgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx, diff --git a/drivers/net/txgbe/txgbe_tm.c b/drivers/net/txgbe/txgbe_tm.c index 78f426964..545590ba2 100644 --- a/drivers/net/txgbe/txgbe_tm.c +++ b/drivers/net/txgbe/txgbe_tm.c @@ -6,6 +6,36 @@ #include "txgbe_ethdev.h" +static int txgbe_tm_capabilities_get(struct rte_eth_dev *dev, + struct rte_tm_capabilities *cap, + struct rte_tm_error *error); +static int txgbe_level_capabilities_get(struct rte_eth_dev *dev, + uint32_t level_id, + struct rte_tm_level_capabilities *cap, + struct rte_tm_error *error); +static int txgbe_node_capabilities_get(struct rte_eth_dev *dev, + uint32_t node_id, + struct rte_tm_node_capabilities *cap, + struct rte_tm_error *error); + +const struct rte_tm_ops txgbe_tm_ops = { + .capabilities_get = txgbe_tm_capabilities_get, + .level_capabilities_get = txgbe_level_capabilities_get, + .node_capabilities_get = txgbe_node_capabilities_get, +}; + +int +txgbe_tm_ops_get(struct rte_eth_dev *dev __rte_unused, + void *arg) +{ + if (!arg) + return -EINVAL; + + *(const void **)arg = &txgbe_tm_ops; + + return 0; +} + void txgbe_tm_conf_init(struct rte_eth_dev *dev) { @@ -55,3 +85,251 @@ txgbe_tm_conf_uninit(struct rte_eth_dev *dev) } } +static inline uint8_t +txgbe_tc_nb_get(struct rte_eth_dev *dev) +{ + struct rte_eth_conf *eth_conf; + uint8_t nb_tcs = 0; + + eth_conf = &dev->data->dev_conf; + if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) { + nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs; + } else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) { + if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools == + ETH_32_POOLS) + nb_tcs = ETH_4_TCS; + else + nb_tcs = ETH_8_TCS; + } else { + nb_tcs = 1; + } + + return nb_tcs; +} + +static int +txgbe_tm_capabilities_get(struct rte_eth_dev *dev, + struct rte_tm_capabilities *cap, + struct rte_tm_error *error) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + uint8_t tc_nb = txgbe_tc_nb_get(dev); + + if (!cap || !error) + return -EINVAL; + + if (tc_nb > hw->mac.max_tx_queues) + return -EINVAL; + + error->type = RTE_TM_ERROR_TYPE_NONE; + + /* set all the parameters to 0 first. */ + memset(cap, 0, sizeof(struct rte_tm_capabilities)); + + /** + * here is the max capability not the current configuration. + */ + /* port + TCs + queues */ + cap->n_nodes_max = 1 + TXGBE_DCB_TC_MAX + + hw->mac.max_tx_queues; + cap->n_levels_max = 3; + cap->non_leaf_nodes_identical = 1; + cap->leaf_nodes_identical = 1; + cap->shaper_n_max = cap->n_nodes_max; + cap->shaper_private_n_max = cap->n_nodes_max; + cap->shaper_private_dual_rate_n_max = 0; + cap->shaper_private_rate_min = 0; + /* 10Gbps -> 1.25GBps */ + cap->shaper_private_rate_max = 1250000000ull; + cap->shaper_shared_n_max = 0; + cap->shaper_shared_n_nodes_per_shaper_max = 0; + cap->shaper_shared_n_shapers_per_node_max = 0; + cap->shaper_shared_dual_rate_n_max = 0; + cap->shaper_shared_rate_min = 0; + cap->shaper_shared_rate_max = 0; + cap->sched_n_children_max = hw->mac.max_tx_queues; + /** + * HW supports SP. But no plan to support it now. + * So, all the nodes should have the same priority. + */ + cap->sched_sp_n_priorities_max = 1; + cap->sched_wfq_n_children_per_group_max = 0; + cap->sched_wfq_n_groups_max = 0; + /** + * SW only supports fair round robin now. + * So, all the nodes should have the same weight. + */ + cap->sched_wfq_weight_max = 1; + cap->cman_head_drop_supported = 0; + cap->dynamic_update_mask = 0; + cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD; + cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS; + cap->cman_wred_context_n_max = 0; + cap->cman_wred_context_private_n_max = 0; + cap->cman_wred_context_shared_n_max = 0; + cap->cman_wred_context_shared_n_nodes_per_context_max = 0; + cap->cman_wred_context_shared_n_contexts_per_node_max = 0; + cap->stats_mask = 0; + + return 0; +} + +static inline struct txgbe_tm_node * +txgbe_tm_node_search(struct rte_eth_dev *dev, uint32_t node_id, + enum txgbe_tm_node_type *node_type) +{ + struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev); + struct txgbe_tm_node *tm_node; + + if (tm_conf->root && tm_conf->root->id == node_id) { + *node_type = TXGBE_TM_NODE_TYPE_PORT; + return tm_conf->root; + } + + TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) { + if (tm_node->id == node_id) { + *node_type = TXGBE_TM_NODE_TYPE_TC; + return tm_node; + } + } + + TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) { + if (tm_node->id == node_id) { + *node_type = TXGBE_TM_NODE_TYPE_QUEUE; + return tm_node; + } + } + + return NULL; +} + +static int +txgbe_level_capabilities_get(struct rte_eth_dev *dev, + uint32_t level_id, + struct rte_tm_level_capabilities *cap, + struct rte_tm_error *error) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + + if (!cap || !error) + return -EINVAL; + + if (level_id >= TXGBE_TM_NODE_TYPE_MAX) { + error->type = RTE_TM_ERROR_TYPE_LEVEL_ID; + error->message = "too deep level"; + return -EINVAL; + } + + /* root node */ + if (level_id == TXGBE_TM_NODE_TYPE_PORT) { + cap->n_nodes_max = 1; + cap->n_nodes_nonleaf_max = 1; + cap->n_nodes_leaf_max = 0; + } else if (level_id == TXGBE_TM_NODE_TYPE_TC) { + /* TC */ + cap->n_nodes_max = TXGBE_DCB_TC_MAX; + cap->n_nodes_nonleaf_max = TXGBE_DCB_TC_MAX; + cap->n_nodes_leaf_max = 0; + } else { + /* queue */ + cap->n_nodes_max = hw->mac.max_tx_queues; + cap->n_nodes_nonleaf_max = 0; + cap->n_nodes_leaf_max = hw->mac.max_tx_queues; + } + + cap->non_leaf_nodes_identical = true; + cap->leaf_nodes_identical = true; + + if (level_id != TXGBE_TM_NODE_TYPE_QUEUE) { + cap->nonleaf.shaper_private_supported = true; + cap->nonleaf.shaper_private_dual_rate_supported = false; + cap->nonleaf.shaper_private_rate_min = 0; + /* 10Gbps -> 1.25GBps */ + cap->nonleaf.shaper_private_rate_max = 1250000000ull; + cap->nonleaf.shaper_shared_n_max = 0; + if (level_id == TXGBE_TM_NODE_TYPE_PORT) + cap->nonleaf.sched_n_children_max = + TXGBE_DCB_TC_MAX; + else + cap->nonleaf.sched_n_children_max = + hw->mac.max_tx_queues; + cap->nonleaf.sched_sp_n_priorities_max = 1; + cap->nonleaf.sched_wfq_n_children_per_group_max = 0; + cap->nonleaf.sched_wfq_n_groups_max = 0; + cap->nonleaf.sched_wfq_weight_max = 1; + cap->nonleaf.stats_mask = 0; + + return 0; + } + + /* queue node */ + cap->leaf.shaper_private_supported = true; + cap->leaf.shaper_private_dual_rate_supported = false; + cap->leaf.shaper_private_rate_min = 0; + /* 10Gbps -> 1.25GBps */ + cap->leaf.shaper_private_rate_max = 1250000000ull; + cap->leaf.shaper_shared_n_max = 0; + cap->leaf.cman_head_drop_supported = false; + cap->leaf.cman_wred_context_private_supported = true; + cap->leaf.cman_wred_context_shared_n_max = 0; + cap->leaf.stats_mask = 0; + + return 0; +} + +static int +txgbe_node_capabilities_get(struct rte_eth_dev *dev, + uint32_t node_id, + struct rte_tm_node_capabilities *cap, + struct rte_tm_error *error) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + enum txgbe_tm_node_type node_type = TXGBE_TM_NODE_TYPE_MAX; + struct txgbe_tm_node *tm_node; + + if (!cap || !error) + return -EINVAL; + + if (node_id == RTE_TM_NODE_ID_NULL) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "invalid node id"; + return -EINVAL; + } + + /* check if the node id exists */ + tm_node = txgbe_tm_node_search(dev, node_id, &node_type); + if (!tm_node) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "no such node"; + return -EINVAL; + } + + cap->shaper_private_supported = true; + cap->shaper_private_dual_rate_supported = false; + cap->shaper_private_rate_min = 0; + /* 10Gbps -> 1.25GBps */ + cap->shaper_private_rate_max = 1250000000ull; + cap->shaper_shared_n_max = 0; + + if (node_type == TXGBE_TM_NODE_TYPE_QUEUE) { + cap->leaf.cman_head_drop_supported = false; + cap->leaf.cman_wred_context_private_supported = true; + cap->leaf.cman_wred_context_shared_n_max = 0; + } else { + if (node_type == TXGBE_TM_NODE_TYPE_PORT) + cap->nonleaf.sched_n_children_max = + TXGBE_DCB_TC_MAX; + else + cap->nonleaf.sched_n_children_max = + hw->mac.max_tx_queues; + cap->nonleaf.sched_sp_n_priorities_max = 1; + cap->nonleaf.sched_wfq_n_children_per_group_max = 0; + cap->nonleaf.sched_wfq_n_groups_max = 0; + cap->nonleaf.sched_wfq_weight_max = 1; + } + + cap->stats_mask = 0; + + return 0; +} + From patchwork Tue Nov 3 10:08:10 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 83550 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 10F2EA0521; Tue, 3 Nov 2020 11:17:40 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 512F5CA95; Tue, 3 Nov 2020 11:07:50 +0100 (CET) Received: from smtpbgau1.qq.com (smtpbgau1.qq.com [54.206.16.166]) by dpdk.org (Postfix) with ESMTP id 50150CA02 for ; Tue, 3 Nov 2020 11:07:33 +0100 (CET) X-QQ-mid: bizesmtp26t1604398047txd2q956 Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp10.qq.com (ESMTP) with id ; Tue, 03 Nov 2020 18:07:27 +0800 (CST) X-QQ-SSF: 01400000002000C0C000B00A0000000 X-QQ-FEAT: mAJfWfDYrJNuG1tj0oMNqB69uJJcM3axWzBM6Jdr2DP28cHsrDPEeeyIzTqW9 68WA4vAf4Jv5mRJvH2QHJSqzYEEyzqRO51LrKB5xsxEmvrftj7/gsOVbiYoicedtwdhDYGa mrtx5OrFeNWX56WTB3vEi9/MY2g34Xx+C5QhhIQB5z6RM3q1Jxpr8ApaciO55TGvMBIJ8FB 9iKHOm2O4dGOZ028a1i0O0yfw/xrkm9yYtx40GTDzV80iBdn2MyQJyD65Bq8HHcimVLjgvt Gtgn+mKM/Es/+zqAeSVfUKINhVV6X9NT/LqcBIKgSiwPhE0N5O5beU1FnSc5cSc4Sp5CR7s +qnukpoAv265TKSGClS1z6kXnq7nA== X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Tue, 3 Nov 2020 18:08:10 +0800 Message-Id: <20201103100818.311881-30-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20201103100818.311881-1-jiawenwu@trustnetic.com> References: <20201103100818.311881-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign6 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH 29/37] net/txgbe: support TM shaper profile add and delete X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Support traffic manager profile add and delete operations. Signed-off-by: Jiawen Wu --- drivers/net/txgbe/txgbe_tm.c | 129 +++++++++++++++++++++++++++++++++++ 1 file changed, 129 insertions(+) diff --git a/drivers/net/txgbe/txgbe_tm.c b/drivers/net/txgbe/txgbe_tm.c index 545590ba2..8adb03825 100644 --- a/drivers/net/txgbe/txgbe_tm.c +++ b/drivers/net/txgbe/txgbe_tm.c @@ -9,6 +9,13 @@ static int txgbe_tm_capabilities_get(struct rte_eth_dev *dev, struct rte_tm_capabilities *cap, struct rte_tm_error *error); +static int txgbe_shaper_profile_add(struct rte_eth_dev *dev, + uint32_t shaper_profile_id, + struct rte_tm_shaper_params *profile, + struct rte_tm_error *error); +static int txgbe_shaper_profile_del(struct rte_eth_dev *dev, + uint32_t shaper_profile_id, + struct rte_tm_error *error); static int txgbe_level_capabilities_get(struct rte_eth_dev *dev, uint32_t level_id, struct rte_tm_level_capabilities *cap, @@ -20,6 +27,8 @@ static int txgbe_node_capabilities_get(struct rte_eth_dev *dev, const struct rte_tm_ops txgbe_tm_ops = { .capabilities_get = txgbe_tm_capabilities_get, + .shaper_profile_add = txgbe_shaper_profile_add, + .shaper_profile_delete = txgbe_shaper_profile_del, .level_capabilities_get = txgbe_level_capabilities_get, .node_capabilities_get = txgbe_node_capabilities_get, }; @@ -174,6 +183,126 @@ txgbe_tm_capabilities_get(struct rte_eth_dev *dev, return 0; } +static inline struct txgbe_tm_shaper_profile * +txgbe_shaper_profile_search(struct rte_eth_dev *dev, + uint32_t shaper_profile_id) +{ + struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev); + struct txgbe_shaper_profile_list *shaper_profile_list = + &tm_conf->shaper_profile_list; + struct txgbe_tm_shaper_profile *shaper_profile; + + TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) { + if (shaper_profile_id == shaper_profile->shaper_profile_id) + return shaper_profile; + } + + return NULL; +} + +static int +txgbe_shaper_profile_param_check(struct rte_tm_shaper_params *profile, + struct rte_tm_error *error) +{ + /* min rate not supported */ + if (profile->committed.rate) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE; + error->message = "committed rate not supported"; + return -EINVAL; + } + /* min bucket size not supported */ + if (profile->committed.size) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE; + error->message = "committed bucket size not supported"; + return -EINVAL; + } + /* max bucket size not supported */ + if (profile->peak.size) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE; + error->message = "peak bucket size not supported"; + return -EINVAL; + } + /* length adjustment not supported */ + if (profile->pkt_length_adjust) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN; + error->message = "packet length adjustment not supported"; + return -EINVAL; + } + + return 0; +} + +static int +txgbe_shaper_profile_add(struct rte_eth_dev *dev, + uint32_t shaper_profile_id, + struct rte_tm_shaper_params *profile, + struct rte_tm_error *error) +{ + struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev); + struct txgbe_tm_shaper_profile *shaper_profile; + int ret; + + if (!profile || !error) + return -EINVAL; + + ret = txgbe_shaper_profile_param_check(profile, error); + if (ret) + return ret; + + shaper_profile = txgbe_shaper_profile_search(dev, shaper_profile_id); + + if (shaper_profile) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID; + error->message = "profile ID exist"; + return -EINVAL; + } + + shaper_profile = rte_zmalloc("txgbe_tm_shaper_profile", + sizeof(struct txgbe_tm_shaper_profile), + 0); + if (!shaper_profile) + return -ENOMEM; + shaper_profile->shaper_profile_id = shaper_profile_id; + rte_memcpy(&shaper_profile->profile, profile, + sizeof(struct rte_tm_shaper_params)); + TAILQ_INSERT_TAIL(&tm_conf->shaper_profile_list, + shaper_profile, node); + + return 0; +} + +static int +txgbe_shaper_profile_del(struct rte_eth_dev *dev, + uint32_t shaper_profile_id, + struct rte_tm_error *error) +{ + struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev); + struct txgbe_tm_shaper_profile *shaper_profile; + + if (!error) + return -EINVAL; + + shaper_profile = txgbe_shaper_profile_search(dev, shaper_profile_id); + + if (!shaper_profile) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID; + error->message = "profile ID not exist"; + return -EINVAL; + } + + /* don't delete a profile if it's used by one or several nodes */ + if (shaper_profile->reference_count) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; + error->message = "profile in use"; + return -EINVAL; + } + + TAILQ_REMOVE(&tm_conf->shaper_profile_list, shaper_profile, node); + rte_free(shaper_profile); + + return 0; +} + static inline struct txgbe_tm_node * txgbe_tm_node_search(struct rte_eth_dev *dev, uint32_t node_id, enum txgbe_tm_node_type *node_type) From patchwork Tue Nov 3 10:08:11 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 83551 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 5BF74A0521; Tue, 3 Nov 2020 11:17:58 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 76C16CAA2; Tue, 3 Nov 2020 11:07:51 +0100 (CET) Received: from smtpproxy21.qq.com (smtpbg702.qq.com [203.205.195.102]) by dpdk.org (Postfix) with ESMTP id D6356CA08 for ; Tue, 3 Nov 2020 11:07:32 +0100 (CET) X-QQ-mid: bizesmtp26t1604398048tyrmmd4w Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp10.qq.com (ESMTP) with id ; Tue, 03 Nov 2020 18:07:28 +0800 (CST) X-QQ-SSF: 01400000002000C0C000B00A0000000 X-QQ-FEAT: 0W2rzDh2BKTj3iaaVEBYsWgghgkcrZ9zjvIU5zmF1VKbqX5Ds5XgcHrzdVKb7 nw+NlZQoiKHPTdrNjOoHTlTSSBzwlgKMkvVlYUO6+/yEcBfZ26RCOxPa0xm9rjgwRkuoMRj /+APrtjkCxi3zai8VCvvqwTr2ysHNKVVSNoz56iQ4BwgCE/AhRj4+D3OWWQnteuGWoJx/O0 hcUwNuoar8OaHBL/+oKHnOMXzNsrlUu5KVEjpN60Fc45L6CJUKusPHBgt3uCYIY6vv+4bvz XAz+It27g2IVAF72vqrN6sIhnnrlms/oUcJm7fIolmiFTIcuFCgrv/0q/K0dfnVEk/QVf3W d572629MUCaZj5Q8pk= X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Tue, 3 Nov 2020 18:08:11 +0800 Message-Id: <20201103100818.311881-31-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20201103100818.311881-1-jiawenwu@trustnetic.com> References: <20201103100818.311881-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign6 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH 30/37] net/txgbe: support TM node add and delete X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Support traffic manager node add and delete operations. Signed-off-by: Jiawen Wu --- drivers/net/txgbe/txgbe_ethdev.h | 1 + drivers/net/txgbe/txgbe_tm.c | 488 +++++++++++++++++++++++++++++++ 2 files changed, 489 insertions(+) diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h index cf377809e..1d90c6a06 100644 --- a/drivers/net/txgbe/txgbe_ethdev.h +++ b/drivers/net/txgbe/txgbe_ethdev.h @@ -14,6 +14,7 @@ #include #include #include +#include #include /* need update link, bit flag */ diff --git a/drivers/net/txgbe/txgbe_tm.c b/drivers/net/txgbe/txgbe_tm.c index 8adb03825..6dd593e54 100644 --- a/drivers/net/txgbe/txgbe_tm.c +++ b/drivers/net/txgbe/txgbe_tm.c @@ -16,6 +16,15 @@ static int txgbe_shaper_profile_add(struct rte_eth_dev *dev, static int txgbe_shaper_profile_del(struct rte_eth_dev *dev, uint32_t shaper_profile_id, struct rte_tm_error *error); +static int txgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id, + uint32_t parent_node_id, uint32_t priority, + uint32_t weight, uint32_t level_id, + struct rte_tm_node_params *params, + struct rte_tm_error *error); +static int txgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id, + struct rte_tm_error *error); +static int txgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id, + int *is_leaf, struct rte_tm_error *error); static int txgbe_level_capabilities_get(struct rte_eth_dev *dev, uint32_t level_id, struct rte_tm_level_capabilities *cap, @@ -29,6 +38,9 @@ const struct rte_tm_ops txgbe_tm_ops = { .capabilities_get = txgbe_tm_capabilities_get, .shaper_profile_add = txgbe_shaper_profile_add, .shaper_profile_delete = txgbe_shaper_profile_del, + .node_add = txgbe_node_add, + .node_delete = txgbe_node_delete, + .node_type_get = txgbe_node_type_get, .level_capabilities_get = txgbe_level_capabilities_get, .node_capabilities_get = txgbe_node_capabilities_get, }; @@ -332,6 +344,482 @@ txgbe_tm_node_search(struct rte_eth_dev *dev, uint32_t node_id, return NULL; } +static void +txgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no, + uint16_t *base, uint16_t *nb) +{ + uint8_t nb_tcs = txgbe_tc_nb_get(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + uint16_t vf_num = pci_dev->max_vfs; + + *base = 0; + *nb = 0; + + /* VT on */ + if (vf_num) { + /* no DCB */ + if (nb_tcs == 1) { + if (vf_num >= ETH_32_POOLS) { + *nb = 2; + *base = vf_num * 2; + } else if (vf_num >= ETH_16_POOLS) { + *nb = 4; + *base = vf_num * 4; + } else { + *nb = 8; + *base = vf_num * 8; + } + } else { + /* DCB */ + *nb = 1; + *base = vf_num * nb_tcs + tc_node_no; + } + } else { + /* VT off */ + if (nb_tcs == ETH_8_TCS) { + switch (tc_node_no) { + case 0: + *base = 0; + *nb = 32; + break; + case 1: + *base = 32; + *nb = 32; + break; + case 2: + *base = 64; + *nb = 16; + break; + case 3: + *base = 80; + *nb = 16; + break; + case 4: + *base = 96; + *nb = 8; + break; + case 5: + *base = 104; + *nb = 8; + break; + case 6: + *base = 112; + *nb = 8; + break; + case 7: + *base = 120; + *nb = 8; + break; + default: + return; + } + } else { + switch (tc_node_no) { + /** + * If no VF and no DCB, only 64 queues can be used. + * This case also be covered by this "case 0". + */ + case 0: + *base = 0; + *nb = 64; + break; + case 1: + *base = 64; + *nb = 32; + break; + case 2: + *base = 96; + *nb = 16; + break; + case 3: + *base = 112; + *nb = 16; + break; + default: + return; + } + } + } +} + +static int +txgbe_node_param_check(struct rte_eth_dev *dev, uint32_t node_id, + uint32_t priority, uint32_t weight, + struct rte_tm_node_params *params, + struct rte_tm_error *error) +{ + if (node_id == RTE_TM_NODE_ID_NULL) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "invalid node id"; + return -EINVAL; + } + + if (priority) { + error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY; + error->message = "priority should be 0"; + return -EINVAL; + } + + if (weight != 1) { + error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT; + error->message = "weight must be 1"; + return -EINVAL; + } + + /* not support shared shaper */ + if (params->shared_shaper_id) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID; + error->message = "shared shaper not supported"; + return -EINVAL; + } + if (params->n_shared_shapers) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS; + error->message = "shared shaper not supported"; + return -EINVAL; + } + + /* for non-leaf node */ + if (node_id >= dev->data->nb_tx_queues) { + /* check the unsupported parameters */ + if (params->nonleaf.wfq_weight_mode) { + error->type = + RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE; + error->message = "WFQ not supported"; + return -EINVAL; + } + if (params->nonleaf.n_sp_priorities != 1) { + error->type = + RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES; + error->message = "SP priority not supported"; + return -EINVAL; + } else if (params->nonleaf.wfq_weight_mode && + !(*params->nonleaf.wfq_weight_mode)) { + error->type = + RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE; + error->message = "WFP should be byte mode"; + return -EINVAL; + } + + return 0; + } + + /* for leaf node */ + /* check the unsupported parameters */ + if (params->leaf.cman) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN; + error->message = "Congestion management not supported"; + return -EINVAL; + } + if (params->leaf.wred.wred_profile_id != + RTE_TM_WRED_PROFILE_ID_NONE) { + error->type = + RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID; + error->message = "WRED not supported"; + return -EINVAL; + } + if (params->leaf.wred.shared_wred_context_id) { + error->type = + RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID; + error->message = "WRED not supported"; + return -EINVAL; + } + if (params->leaf.wred.n_shared_wred_contexts) { + error->type = + RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS; + error->message = "WRED not supported"; + return -EINVAL; + } + + return 0; +} + +/** + * Now the TC and queue configuration is controlled by DCB. + * We need check if the node configuration follows the DCB configuration. + * In the future, we may use TM to cover DCB. + */ +static int +txgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id, + uint32_t parent_node_id, uint32_t priority, + uint32_t weight, uint32_t level_id, + struct rte_tm_node_params *params, + struct rte_tm_error *error) +{ + struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev); + enum txgbe_tm_node_type node_type = TXGBE_TM_NODE_TYPE_MAX; + enum txgbe_tm_node_type parent_node_type = TXGBE_TM_NODE_TYPE_MAX; + struct txgbe_tm_shaper_profile *shaper_profile = NULL; + struct txgbe_tm_node *tm_node; + struct txgbe_tm_node *parent_node; + uint8_t nb_tcs; + uint16_t q_base = 0; + uint16_t q_nb = 0; + int ret; + + if (!params || !error) + return -EINVAL; + + /* if already committed */ + if (tm_conf->committed) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "already committed"; + return -EINVAL; + } + + ret = txgbe_node_param_check(dev, node_id, priority, weight, + params, error); + if (ret) + return ret; + + /* check if the node ID is already used */ + if (txgbe_tm_node_search(dev, node_id, &node_type)) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "node id already used"; + return -EINVAL; + } + + /* check the shaper profile id */ + if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) { + shaper_profile = txgbe_shaper_profile_search(dev, + params->shaper_profile_id); + if (!shaper_profile) { + error->type = + RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID; + error->message = "shaper profile not exist"; + return -EINVAL; + } + } + + /* root node if not have a parent */ + if (parent_node_id == RTE_TM_NODE_ID_NULL) { + /* check level */ + if (level_id != RTE_TM_NODE_LEVEL_ID_ANY && + level_id > TXGBE_TM_NODE_TYPE_PORT) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS; + error->message = "Wrong level"; + return -EINVAL; + } + + /* obviously no more than one root */ + if (tm_conf->root) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; + error->message = "already have a root"; + return -EINVAL; + } + + /* add the root node */ + tm_node = rte_zmalloc("txgbe_tm_node", + sizeof(struct txgbe_tm_node), + 0); + if (!tm_node) + return -ENOMEM; + tm_node->id = node_id; + tm_node->priority = priority; + tm_node->weight = weight; + tm_node->reference_count = 0; + tm_node->no = 0; + tm_node->parent = NULL; + tm_node->shaper_profile = shaper_profile; + rte_memcpy(&tm_node->params, params, + sizeof(struct rte_tm_node_params)); + tm_conf->root = tm_node; + + /* increase the reference counter of the shaper profile */ + if (shaper_profile) + shaper_profile->reference_count++; + + return 0; + } + + /* TC or queue node */ + /* check the parent node */ + parent_node = txgbe_tm_node_search(dev, parent_node_id, + &parent_node_type); + if (!parent_node) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; + error->message = "parent not exist"; + return -EINVAL; + } + if (parent_node_type != TXGBE_TM_NODE_TYPE_PORT && + parent_node_type != TXGBE_TM_NODE_TYPE_TC) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; + error->message = "parent is not port or TC"; + return -EINVAL; + } + /* check level */ + if (level_id != RTE_TM_NODE_LEVEL_ID_ANY && + level_id != parent_node_type + 1) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS; + error->message = "Wrong level"; + return -EINVAL; + } + + /* check the node number */ + if (parent_node_type == TXGBE_TM_NODE_TYPE_PORT) { + /* check TC number */ + nb_tcs = txgbe_tc_nb_get(dev); + if (tm_conf->nb_tc_node >= nb_tcs) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "too many TCs"; + return -EINVAL; + } + } else { + /* check queue number */ + if (tm_conf->nb_queue_node >= dev->data->nb_tx_queues) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "too many queues"; + return -EINVAL; + } + + txgbe_queue_base_nb_get(dev, parent_node->no, &q_base, &q_nb); + if (parent_node->reference_count >= q_nb) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "too many queues than TC supported"; + return -EINVAL; + } + + /** + * check the node id. + * For queue, the node id means queue id. + */ + if (node_id >= dev->data->nb_tx_queues) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "too large queue id"; + return -EINVAL; + } + } + + /* add the TC or queue node */ + tm_node = rte_zmalloc("txgbe_tm_node", + sizeof(struct txgbe_tm_node), + 0); + if (!tm_node) + return -ENOMEM; + tm_node->id = node_id; + tm_node->priority = priority; + tm_node->weight = weight; + tm_node->reference_count = 0; + tm_node->parent = parent_node; + tm_node->shaper_profile = shaper_profile; + rte_memcpy(&tm_node->params, params, + sizeof(struct rte_tm_node_params)); + if (parent_node_type == TXGBE_TM_NODE_TYPE_PORT) { + tm_node->no = parent_node->reference_count; + TAILQ_INSERT_TAIL(&tm_conf->tc_list, + tm_node, node); + tm_conf->nb_tc_node++; + } else { + tm_node->no = q_base + parent_node->reference_count; + TAILQ_INSERT_TAIL(&tm_conf->queue_list, + tm_node, node); + tm_conf->nb_queue_node++; + } + tm_node->parent->reference_count++; + + /* increase the reference counter of the shaper profile */ + if (shaper_profile) + shaper_profile->reference_count++; + + return 0; +} + +static int +txgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id, + struct rte_tm_error *error) +{ + struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev); + enum txgbe_tm_node_type node_type = TXGBE_TM_NODE_TYPE_MAX; + struct txgbe_tm_node *tm_node; + + if (!error) + return -EINVAL; + + /* if already committed */ + if (tm_conf->committed) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "already committed"; + return -EINVAL; + } + + if (node_id == RTE_TM_NODE_ID_NULL) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "invalid node id"; + return -EINVAL; + } + + /* check the if the node id exists */ + tm_node = txgbe_tm_node_search(dev, node_id, &node_type); + if (!tm_node) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "no such node"; + return -EINVAL; + } + + /* the node should have no child */ + if (tm_node->reference_count) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = + "cannot delete a node which has children"; + return -EINVAL; + } + + /* root node */ + if (node_type == TXGBE_TM_NODE_TYPE_PORT) { + if (tm_node->shaper_profile) + tm_node->shaper_profile->reference_count--; + rte_free(tm_node); + tm_conf->root = NULL; + return 0; + } + + /* TC or queue node */ + if (tm_node->shaper_profile) + tm_node->shaper_profile->reference_count--; + tm_node->parent->reference_count--; + if (node_type == TXGBE_TM_NODE_TYPE_TC) { + TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node); + tm_conf->nb_tc_node--; + } else { + TAILQ_REMOVE(&tm_conf->queue_list, tm_node, node); + tm_conf->nb_queue_node--; + } + rte_free(tm_node); + + return 0; +} + +static int +txgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id, + int *is_leaf, struct rte_tm_error *error) +{ + enum txgbe_tm_node_type node_type = TXGBE_TM_NODE_TYPE_MAX; + struct txgbe_tm_node *tm_node; + + if (!is_leaf || !error) + return -EINVAL; + + if (node_id == RTE_TM_NODE_ID_NULL) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "invalid node id"; + return -EINVAL; + } + + /* check if the node id exists */ + tm_node = txgbe_tm_node_search(dev, node_id, &node_type); + if (!tm_node) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "no such node"; + return -EINVAL; + } + + if (node_type == TXGBE_TM_NODE_TYPE_QUEUE) + *is_leaf = true; + else + *is_leaf = false; + + return 0; +} + static int txgbe_level_capabilities_get(struct rte_eth_dev *dev, uint32_t level_id, From patchwork Tue Nov 3 10:08:12 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 83552 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 02F4FA0521; Tue, 3 Nov 2020 11:18:20 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 49B90CAB7; Tue, 3 Nov 2020 11:07:53 +0100 (CET) Received: from smtpbgsg2.qq.com (smtpbgsg2.qq.com [54.254.200.128]) by dpdk.org (Postfix) with ESMTP id 91A40C85E for ; Tue, 3 Nov 2020 11:07:35 +0100 (CET) X-QQ-mid: bizesmtp26t1604398050tq7917su Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp10.qq.com (ESMTP) with id ; Tue, 03 Nov 2020 18:07:29 +0800 (CST) X-QQ-SSF: 01400000002000C0C000B00A0000000 X-QQ-FEAT: mJG+EqVdzGC7KHvFdBAnfQee8jkBMXomgzIIMJIDtD2WKrLB36x2lIyVSRUz2 E+aVh4Jc3/jMMtc1NudNjx+X9y/C1hxraDD2DVU2T3irkcMHJoA16mMRplYvsssxAZsN8f3 iXa37TTwb9xaPtHTYqJmwpYO9fKJ92YtwIH9T8fBQEPSTsDnJ9ESh4k6ZSZ6tWgI/KF/Bi2 vJ5HrQSSeg3qHHDogvP0ZxI18T2B5+0Q3Bfekqr/XkMYdfqrDsvCOCKm7qnEbCECGhd7LS5 +ts/aeNQkqXiLhHs198S7NOCU8MSXlBcd58awv3A1/+CNuKJ0D09bjjwTfCNKEaqfu4KOa/ zTRlS2ipfNnG/Op5TAFrnCl1yte7Q== X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Tue, 3 Nov 2020 18:08:12 +0800 Message-Id: <20201103100818.311881-32-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20201103100818.311881-1-jiawenwu@trustnetic.com> References: <20201103100818.311881-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign5 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH 31/37] net/txgbe: add TM hierarchy commit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add traffic manager hierarchy commit. Signed-off-by: Jiawen Wu --- drivers/net/txgbe/txgbe_tm.c | 70 ++++++++++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) diff --git a/drivers/net/txgbe/txgbe_tm.c b/drivers/net/txgbe/txgbe_tm.c index 6dd593e54..b8edd78bf 100644 --- a/drivers/net/txgbe/txgbe_tm.c +++ b/drivers/net/txgbe/txgbe_tm.c @@ -33,6 +33,9 @@ static int txgbe_node_capabilities_get(struct rte_eth_dev *dev, uint32_t node_id, struct rte_tm_node_capabilities *cap, struct rte_tm_error *error); +static int txgbe_hierarchy_commit(struct rte_eth_dev *dev, + int clear_on_fail, + struct rte_tm_error *error); const struct rte_tm_ops txgbe_tm_ops = { .capabilities_get = txgbe_tm_capabilities_get, @@ -43,6 +46,7 @@ const struct rte_tm_ops txgbe_tm_ops = { .node_type_get = txgbe_node_type_get, .level_capabilities_get = txgbe_level_capabilities_get, .node_capabilities_get = txgbe_node_capabilities_get, + .hierarchy_commit = txgbe_hierarchy_commit, }; int @@ -950,3 +954,69 @@ txgbe_node_capabilities_get(struct rte_eth_dev *dev, return 0; } +static int +txgbe_hierarchy_commit(struct rte_eth_dev *dev, + int clear_on_fail, + struct rte_tm_error *error) +{ + struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev); + struct txgbe_tm_node *tm_node; + uint64_t bw; + int ret; + + if (!error) + return -EINVAL; + + /* check the setting */ + if (!tm_conf->root) + goto done; + + /* not support port max bandwidth yet */ + if (tm_conf->root->shaper_profile && + tm_conf->root->shaper_profile->profile.peak.rate) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; + error->message = "no port max bandwidth"; + goto fail_clear; + } + + /* HW not support TC max bandwidth */ + TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) { + if (tm_node->shaper_profile && + tm_node->shaper_profile->profile.peak.rate) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; + error->message = "no TC max bandwidth"; + goto fail_clear; + } + } + + /* queue max bandwidth */ + TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) { + if (tm_node->shaper_profile) + bw = tm_node->shaper_profile->profile.peak.rate; + else + bw = 0; + if (bw) { + /* interpret Bps to Mbps */ + bw = bw * 8 / 1000 / 1000; + ret = txgbe_set_queue_rate_limit(dev, tm_node->no, bw); + if (ret) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; + error->message = + "failed to set queue max bandwidth"; + goto fail_clear; + } + } + } + +done: + tm_conf->committed = true; + return 0; + +fail_clear: + /* clear all the traffic manager configuration */ + if (clear_on_fail) { + txgbe_tm_conf_uninit(dev); + txgbe_tm_conf_init(dev); + } + return -EINVAL; +} From patchwork Tue Nov 3 10:08:13 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 83553 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id AE402A0521; Tue, 3 Nov 2020 11:18:38 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id ADCA2CAC0; Tue, 3 Nov 2020 11:07:54 +0100 (CET) Received: from smtpbgeu1.qq.com (smtpbgeu1.qq.com [52.59.177.22]) by dpdk.org (Postfix) with ESMTP id 65E2ACA1E for ; Tue, 3 Nov 2020 11:07:35 +0100 (CET) X-QQ-mid: bizesmtp26t1604398051t7nshm20 Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp10.qq.com (ESMTP) with id ; Tue, 03 Nov 2020 18:07:30 +0800 (CST) X-QQ-SSF: 01400000002000C0C000B00A0000000 X-QQ-FEAT: ql6Zc2TW8JysJ8IDWRpPbrhmj9SL+d1qKfiMoUpMtAohdlPv4GLvBU5MQVcWq NJvCrZO75d9NlfwUYnI92hHxFM/VjzDRwa8lWqIpGXL3nC4JXaK0UcIfnEV9F84S/DYQYEQ y42DZr97oArueVgW6EAsuW79sLalQUgwbXCdK5/BjWCrasSWXXNCx/BvibUaUBwrOBfAUMm BR1uoe7V6Lx5PCNivGeGQXOr8+9y5AjUkcv48RCrCiDR1zrUO24Ryb8nwsJKTA7stpo2DOQ /fJsd51bTcnVQvvcAIe5ovO9Md6pLSVIfWM+GR9LjPrNYbQfPBUyKxVbRr303KLHnUsU4Dk X/stnO7mIN0ymK8DXbDX9qHVGw4IA== X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Tue, 3 Nov 2020 18:08:13 +0800 Message-Id: <20201103100818.311881-33-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20201103100818.311881-1-jiawenwu@trustnetic.com> References: <20201103100818.311881-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign6 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH 32/37] net/txgbe: add macsec setting X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add macsec register enable and setting reset operations. Add macsec offload suuport. Signed-off-by: Jiawen Wu --- doc/guides/nics/features/txgbe.ini | 1 + drivers/net/txgbe/txgbe_ethdev.c | 87 ++++++++++++++++++++++++++++++ drivers/net/txgbe/txgbe_ethdev.h | 17 ++++++ drivers/net/txgbe/txgbe_rxtx.c | 3 ++ 4 files changed, 108 insertions(+) diff --git a/doc/guides/nics/features/txgbe.ini b/doc/guides/nics/features/txgbe.ini index 9db2ccde0..d6705f7aa 100644 --- a/doc/guides/nics/features/txgbe.ini +++ b/doc/guides/nics/features/txgbe.ini @@ -34,6 +34,7 @@ VLAN offload = P QinQ offload = P L3 checksum offload = P L4 checksum offload = P +MACsec offload = P Inner L3 checksum = P Inner L4 checksum = P Packet type parsing = Y diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c index 80ea1038c..c92a4aa5f 100644 --- a/drivers/net/txgbe/txgbe_ethdev.c +++ b/drivers/net/txgbe/txgbe_ethdev.c @@ -487,6 +487,8 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) PMD_INIT_FUNC_TRACE(); + txgbe_dev_macsec_setting_reset(eth_dev); + eth_dev->dev_ops = &txgbe_eth_dev_ops; eth_dev->rx_queue_count = txgbe_dev_rx_queue_count; eth_dev->rx_descriptor_status = txgbe_dev_rx_descriptor_status; @@ -1549,6 +1551,8 @@ txgbe_dev_start(struct rte_eth_dev *dev) uint16_t vf, idx; uint32_t *link_speeds; struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev); + struct txgbe_macsec_setting *macsec_setting = + TXGBE_DEV_MACSEC_SETTING(dev); PMD_INIT_FUNC_TRACE(); @@ -1763,6 +1767,10 @@ txgbe_dev_start(struct rte_eth_dev *dev) */ txgbe_dev_link_update(dev, 0); + /* setup the macsec ctrl register */ + if (macsec_setting->offload_en) + txgbe_dev_macsec_register_enable(dev, macsec_setting); + wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_ORD_MASK); txgbe_read_stats_registers(hw, hw_stats); @@ -5326,6 +5334,85 @@ txgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev) return 0; } +void +txgbe_dev_macsec_setting_reset(struct rte_eth_dev *dev) +{ + struct txgbe_macsec_setting *macsec = TXGBE_DEV_MACSEC_SETTING(dev); + + macsec->offload_en = 0; + macsec->encrypt_en = 0; + macsec->replayprotect_en = 0; +} + +void +txgbe_dev_macsec_register_enable(struct rte_eth_dev *dev, + struct txgbe_macsec_setting *macsec_setting) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + uint32_t ctrl; + uint8_t en = macsec_setting->encrypt_en; + uint8_t rp = macsec_setting->replayprotect_en; + + /** + * Workaround: + * As no txgbe_disable_sec_rx_path equivalent is + * implemented for tx in the base code, and we are + * not allowed to modify the base code in DPDK, so + * just call the hand-written one directly for now. + * The hardware support has been checked by + * txgbe_disable_sec_rx_path(). + */ + txgbe_disable_sec_tx_path(hw); + + /* Enable Ethernet CRC (required by MACsec offload) */ + ctrl = rd32(hw, TXGBE_SECRXCTL); + ctrl |= TXGBE_SECRXCTL_CRCSTRIP; + wr32(hw, TXGBE_SECRXCTL, ctrl); + + /* Enable the TX and RX crypto engines */ + ctrl = rd32(hw, TXGBE_SECTXCTL); + ctrl &= ~TXGBE_SECTXCTL_XDSA; + wr32(hw, TXGBE_SECTXCTL, ctrl); + + ctrl = rd32(hw, TXGBE_SECRXCTL); + ctrl &= ~TXGBE_SECRXCTL_XDSA; + wr32(hw, TXGBE_SECRXCTL, ctrl); + + ctrl = rd32(hw, TXGBE_SECTXIFG); + ctrl &= ~TXGBE_SECTXIFG_MIN_MASK; + ctrl |= TXGBE_SECTXIFG_MIN(0x3); + wr32(hw, TXGBE_SECTXIFG, ctrl); + + /* Enable SA lookup */ + ctrl = rd32(hw, TXGBE_LSECTXCTL); + ctrl &= ~TXGBE_LSECTXCTL_MODE_MASK; + ctrl |= en ? TXGBE_LSECTXCTL_MODE_AENC : TXGBE_LSECTXCTL_MODE_AUTH; + ctrl &= ~TXGBE_LSECTXCTL_PNTRH_MASK; + ctrl |= TXGBE_LSECTXCTL_PNTRH(TXGBE_MACSEC_PNTHRSH); + wr32(hw, TXGBE_LSECTXCTL, ctrl); + + ctrl = rd32(hw, TXGBE_LSECRXCTL); + ctrl &= ~TXGBE_LSECRXCTL_MODE_MASK; + ctrl |= TXGBE_LSECRXCTL_MODE_STRICT; + ctrl &= ~TXGBE_LSECRXCTL_POSTHDR; + if (rp) + ctrl |= TXGBE_LSECRXCTL_REPLAY; + else + ctrl &= ~TXGBE_LSECRXCTL_REPLAY; + wr32(hw, TXGBE_LSECRXCTL, ctrl); + + /* Start the data paths */ + txgbe_enable_sec_rx_path(hw); + /** + * Workaround: + * As no txgbe_enable_sec_rx_path equivalent is + * implemented for tx in the base code, and we are + * not allowed to modify the base code in DPDK, so + * just call the hand-written one directly for now. + */ + txgbe_enable_sec_tx_path(hw); +} + static const struct eth_dev_ops txgbe_eth_dev_ops = { .dev_configure = txgbe_dev_configure, .dev_infos_get = txgbe_dev_info_get, diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h index 1d90c6a06..2d15e1ac3 100644 --- a/drivers/net/txgbe/txgbe_ethdev.h +++ b/drivers/net/txgbe/txgbe_ethdev.h @@ -62,6 +62,8 @@ #define TXGBE_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET #define TXGBE_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET +#define TXGBE_MACSEC_PNTHRSH 0xFFFFFE00 + #define TXGBE_MAX_FDIR_FILTER_NUM (1024 * 32) #define TXGBE_MAX_L2_TN_FILTER_NUM 128 @@ -270,6 +272,12 @@ struct rte_flow { void *rule; }; +struct txgbe_macsec_setting { + uint8_t offload_en; + uint8_t encrypt_en; + uint8_t replayprotect_en; +}; + /* The configuration of bandwidth */ struct txgbe_bw_conf { uint8_t tc_num; /* Number of TCs. */ @@ -343,6 +351,7 @@ struct txgbe_tm_conf { struct txgbe_adapter { struct txgbe_hw hw; struct txgbe_hw_stats stats; + struct txgbe_macsec_setting macsec_setting; struct txgbe_hw_fdir_info fdir; struct txgbe_interrupt intr; struct txgbe_stat_mappings stat_mappings; @@ -374,6 +383,9 @@ struct txgbe_adapter { #define TXGBE_DEV_STATS(dev) \ (&((struct txgbe_adapter *)(dev)->data->dev_private)->stats) +#define TXGBE_DEV_MACSEC_SETTING(dev) \ + (&((struct txgbe_adapter *)(dev)->data->dev_private)->macsec_setting) + #define TXGBE_DEV_INTR(dev) \ (&((struct txgbe_adapter *)(dev)->data->dev_private)->intr) @@ -579,6 +591,11 @@ int txgbe_action_rss_same(const struct rte_flow_action_rss *comp, int txgbe_config_rss_filter(struct rte_eth_dev *dev, struct txgbe_rte_flow_rss_conf *conf, bool add); +void txgbe_dev_macsec_register_enable(struct rte_eth_dev *dev, + struct txgbe_macsec_setting *macsec_setting); + +void txgbe_dev_macsec_setting_reset(struct rte_eth_dev *dev); + static inline int txgbe_ethertype_filter_lookup(struct txgbe_filter_info *filter_info, uint16_t ethertype) diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c index babda3a79..f99924a3f 100644 --- a/drivers/net/txgbe/txgbe_rxtx.c +++ b/drivers/net/txgbe/txgbe_rxtx.c @@ -56,6 +56,9 @@ static const u64 TXGBE_TX_OFFLOAD_MASK = (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK | PKT_TX_TCP_SEG | PKT_TX_TUNNEL_MASK | +#ifdef RTE_LIBRTE_MACSEC + PKT_TX_MACSEC | +#endif PKT_TX_OUTER_IP_CKSUM | TXGBE_TX_IEEE1588_TMST); From patchwork Tue Nov 3 10:08:14 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 83554 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 6CABAA0521; Tue, 3 Nov 2020 11:18:59 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 2BC84CAC8; Tue, 3 Nov 2020 11:07:56 +0100 (CET) Received: from smtpbgsg2.qq.com (smtpbgsg2.qq.com [54.254.200.128]) by dpdk.org (Postfix) with ESMTP id 3E6DDCA34 for ; Tue, 3 Nov 2020 11:07:37 +0100 (CET) X-QQ-mid: bizesmtp26t1604398052tzgbpwq8 Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp10.qq.com (ESMTP) with id ; Tue, 03 Nov 2020 18:07:32 +0800 (CST) X-QQ-SSF: 01400000002000C0C000B00A0000000 X-QQ-FEAT: 5p2KJntE3DErE050/BaFXCSu5InmSBchB3zAZe3bjKsNnLoLykA9fLzT7Ynyo i1aZMaNCDStsuVoQIvF03qTAdQn3e+cSOtH/xPihzniiadbKJwPn/TTEnH7bLkESG0ZPrDe ONHL0d2GepJnGpcAObrJ7hzE0mCe37qA8p2/CKw2gemEiEIGBA6y5dkJdoX2JhYKq/xtiOA waVdEJVMv0fIAmUte2QkVO/UGX2z8XDIWZRSssk1o3bZ7fbZ4TBzKDQhLlBroGfDJZhm1s5 enY1ZSaeDRgHw3SaT0mJACloP+kpzVsd7M3+d7BX/WE/oTQEm/oP25BnGHsSOp/yD3taJXU 4xeQd7lZMWkUCU2Dvrt8aT9MOSbVx/eiIP2KCnD X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Tue, 3 Nov 2020 18:08:14 +0800 Message-Id: <20201103100818.311881-34-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20201103100818.311881-1-jiawenwu@trustnetic.com> References: <20201103100818.311881-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign5 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH 33/37] net/txgbe: add IPsec context creation X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Initialize securiry context, and add support to get security capabilities. Signed-off-by: Jiawen Wu --- doc/guides/nics/features/txgbe.ini | 1 + drivers/net/txgbe/meson.build | 3 +- drivers/net/txgbe/txgbe_ethdev.c | 13 +++ drivers/net/txgbe/txgbe_ethdev.h | 3 + drivers/net/txgbe/txgbe_ipsec.c | 181 +++++++++++++++++++++++++++++ drivers/net/txgbe/txgbe_ipsec.h | 13 +++ 6 files changed, 213 insertions(+), 1 deletion(-) create mode 100644 drivers/net/txgbe/txgbe_ipsec.c create mode 100644 drivers/net/txgbe/txgbe_ipsec.h diff --git a/doc/guides/nics/features/txgbe.ini b/doc/guides/nics/features/txgbe.ini index d6705f7aa..54daa382a 100644 --- a/doc/guides/nics/features/txgbe.ini +++ b/doc/guides/nics/features/txgbe.ini @@ -29,6 +29,7 @@ Flow control = Y Flow API = Y Rate limitation = Y Traffic mirroring = Y +Inline crypto = Y CRC offload = P VLAN offload = P QinQ offload = P diff --git a/drivers/net/txgbe/meson.build b/drivers/net/txgbe/meson.build index 352baad8b..f6a51a998 100644 --- a/drivers/net/txgbe/meson.build +++ b/drivers/net/txgbe/meson.build @@ -8,13 +8,14 @@ sources = files( 'txgbe_ethdev.c', 'txgbe_fdir.c', 'txgbe_flow.c', + 'txgbe_ipsec.c', 'txgbe_ptypes.c', 'txgbe_pf.c', 'txgbe_rxtx.c', 'txgbe_tm.c', ) -deps += ['hash'] +deps += ['hash', 'security'] includes += include_directories('base') diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c index c92a4aa5f..c0582e264 100644 --- a/drivers/net/txgbe/txgbe_ethdev.c +++ b/drivers/net/txgbe/txgbe_ethdev.c @@ -16,6 +16,9 @@ #include #include #include +#ifdef RTE_LIB_SECURITY +#include +#endif #include "txgbe_logs.h" #include "base/txgbe.h" @@ -549,6 +552,12 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) /* Unlock any pending hardware semaphore */ txgbe_swfw_lock_reset(hw); +#ifdef RTE_LIB_SECURITY + /* Initialize security_ctx only for primary process*/ + if (txgbe_ipsec_ctx_create(eth_dev)) + return -ENOMEM; +#endif + /* Initialize DCB configuration*/ memset(dcb_config, 0, sizeof(struct txgbe_dcb_config)); txgbe_dcb_init(hw, dcb_config); @@ -1971,6 +1980,10 @@ txgbe_dev_close(struct rte_eth_dev *dev) /* Remove all Traffic Manager configuration */ txgbe_tm_conf_uninit(dev); +#ifdef RTE_LIB_SECURITY + rte_free(dev->security_ctx); +#endif + return ret; } diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h index 2d15e1ac3..1e7fa1f87 100644 --- a/drivers/net/txgbe/txgbe_ethdev.h +++ b/drivers/net/txgbe/txgbe_ethdev.h @@ -9,6 +9,9 @@ #include "base/txgbe.h" #include "txgbe_ptypes.h" +#ifdef RTE_LIB_SECURITY +#include "txgbe_ipsec.h" +#endif #include #include #include diff --git a/drivers/net/txgbe/txgbe_ipsec.c b/drivers/net/txgbe/txgbe_ipsec.c new file mode 100644 index 000000000..b21bba237 --- /dev/null +++ b/drivers/net/txgbe/txgbe_ipsec.c @@ -0,0 +1,181 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2015-2020 + */ + +#include +#include +#include +#include +#include +#include + +#include "base/txgbe.h" +#include "txgbe_ethdev.h" +#include "txgbe_ipsec.h" + +static const struct rte_security_capability * +txgbe_crypto_capabilities_get(void *device __rte_unused) +{ + static const struct rte_cryptodev_capabilities + aes_gcm_gmac_crypto_capabilities[] = { + { /* AES GMAC (128-bit) */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_AES_GMAC, + .block_size = 16, + .key_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + .digest_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + .iv_size = { + .min = 12, + .max = 12, + .increment = 0 + } + }, } + }, } + }, + { /* AES GCM (128-bit) */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, + {.aead = { + .algo = RTE_CRYPTO_AEAD_AES_GCM, + .block_size = 16, + .key_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + .digest_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + .aad_size = { + .min = 0, + .max = 65535, + .increment = 1 + }, + .iv_size = { + .min = 12, + .max = 12, + .increment = 0 + } + }, } + }, } + }, + { + .op = RTE_CRYPTO_OP_TYPE_UNDEFINED, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED + }, } + }, + }; + + static const struct rte_security_capability + txgbe_security_capabilities[] = { + { /* IPsec Inline Crypto ESP Transport Egress */ + .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, + .protocol = RTE_SECURITY_PROTOCOL_IPSEC, + {.ipsec = { + .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, + .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT, + .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS, + .options = { 0 } + } }, + .crypto_capabilities = aes_gcm_gmac_crypto_capabilities, + .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA + }, + { /* IPsec Inline Crypto ESP Transport Ingress */ + .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, + .protocol = RTE_SECURITY_PROTOCOL_IPSEC, + {.ipsec = { + .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, + .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT, + .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS, + .options = { 0 } + } }, + .crypto_capabilities = aes_gcm_gmac_crypto_capabilities, + .ol_flags = 0 + }, + { /* IPsec Inline Crypto ESP Tunnel Egress */ + .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, + .protocol = RTE_SECURITY_PROTOCOL_IPSEC, + {.ipsec = { + .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, + .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL, + .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS, + .options = { 0 } + } }, + .crypto_capabilities = aes_gcm_gmac_crypto_capabilities, + .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA + }, + { /* IPsec Inline Crypto ESP Tunnel Ingress */ + .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, + .protocol = RTE_SECURITY_PROTOCOL_IPSEC, + {.ipsec = { + .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, + .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL, + .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS, + .options = { 0 } + } }, + .crypto_capabilities = aes_gcm_gmac_crypto_capabilities, + .ol_flags = 0 + }, + { + .action = RTE_SECURITY_ACTION_TYPE_NONE + } + }; + + return txgbe_security_capabilities; +} + +static struct rte_security_ops txgbe_security_ops = { + .capabilities_get = txgbe_crypto_capabilities_get +}; + +static int +txgbe_crypto_capable(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + uint32_t reg_i, reg, capable = 1; + /* test if rx crypto can be enabled and then write back initial value*/ + reg_i = rd32(hw, TXGBE_SECRXCTL); + wr32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA, 0); + reg = rd32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA); + if (reg != 0) + capable = 0; + wr32(hw, TXGBE_SECRXCTL, reg_i); + return capable; +} + +int +txgbe_ipsec_ctx_create(struct rte_eth_dev *dev) +{ + struct rte_security_ctx *ctx = NULL; + + if (txgbe_crypto_capable(dev)) { + ctx = rte_malloc("rte_security_instances_ops", + sizeof(struct rte_security_ctx), 0); + if (ctx) { + ctx->device = (void *)dev; + ctx->ops = &txgbe_security_ops; + ctx->sess_cnt = 0; + dev->security_ctx = ctx; + } else { + return -ENOMEM; + } + } + if (rte_security_dynfield_register() < 0) + return -rte_errno; + return 0; +} diff --git a/drivers/net/txgbe/txgbe_ipsec.h b/drivers/net/txgbe/txgbe_ipsec.h new file mode 100644 index 000000000..f58ebab3d --- /dev/null +++ b/drivers/net/txgbe/txgbe_ipsec.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2015-2020 + */ + +#ifndef TXGBE_IPSEC_H_ +#define TXGBE_IPSEC_H_ + +#include +#include + +int txgbe_ipsec_ctx_create(struct rte_eth_dev *dev); + +#endif /*TXGBE_IPSEC_H_*/ From patchwork Tue Nov 3 10:08:15 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 83555 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 5CD9EA0521; Tue, 3 Nov 2020 11:19:19 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 8E9FEC904; Tue, 3 Nov 2020 11:07:57 +0100 (CET) Received: from smtpbgbr2.qq.com (smtpbgbr2.qq.com [54.207.22.56]) by dpdk.org (Postfix) with ESMTP id 3623ACA34 for ; Tue, 3 Nov 2020 11:07:39 +0100 (CET) X-QQ-mid: bizesmtp26t1604398053tyna16w0 Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp10.qq.com (ESMTP) with id ; Tue, 03 Nov 2020 18:07:33 +0800 (CST) X-QQ-SSF: 01400000002000C0C000B00A0000000 X-QQ-FEAT: 4ufqJHNJpU2Opvxa8pbaZSXMwmTw95osJbRX/wUlv4mEo/GxPfFGp74pI5W4g 20RvJ0NUo08etGrfrnPavkC0XqmmYe58NNYSR6oBjLhz66fE5w53rlO7F399KOtBr2e+0wa PEFfsk7ZMco6XntRmo2Qx/A/446WKDOx//9FltTb9Lhg3g47G5kINOF7tuHFCabXzWv5xyB UlHYKJOYlkhKxR6TinAfTJP7Wk3VH1o7V1PiUc3JOUE4JVnZpt46RRZQ+MJ/i7nUQz7Dbxd YVUYUiSurQJbhRHvcdS9qQtq24KBMepuBFzjc9nBjHq4cVTQNhUgtP+5Scp1x0BRN0pn0RW an7pMK+pWV8puQWad/+EWcEPSHrmQ== X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Tue, 3 Nov 2020 18:08:15 +0800 Message-Id: <20201103100818.311881-35-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20201103100818.311881-1-jiawenwu@trustnetic.com> References: <20201103100818.311881-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign5 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH 34/37] net/txgbe: add security session create operation X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add support to configure a security session. Signed-off-by: Jiawen Wu --- drivers/net/txgbe/txgbe_ethdev.h | 6 + drivers/net/txgbe/txgbe_ipsec.c | 250 +++++++++++++++++++++++++++++++ drivers/net/txgbe/txgbe_ipsec.h | 66 ++++++++ 3 files changed, 322 insertions(+) diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h index 1e7fa1f87..eb68e12b2 100644 --- a/drivers/net/txgbe/txgbe_ethdev.h +++ b/drivers/net/txgbe/txgbe_ethdev.h @@ -367,6 +367,9 @@ struct txgbe_adapter { struct txgbe_filter_info filter; struct txgbe_l2_tn_info l2_tn; struct txgbe_bw_conf bw_conf; +#ifdef RTE_LIB_SECURITY + struct txgbe_ipsec ipsec; +#endif bool rx_bulk_alloc_allowed; struct rte_timecounter systime_tc; struct rte_timecounter rx_tstamp_tc; @@ -428,6 +431,9 @@ struct txgbe_adapter { #define TXGBE_DEV_TM_CONF(dev) \ (&((struct txgbe_adapter *)(dev)->data->dev_private)->tm_conf) +#define TXGBE_DEV_IPSEC(dev) \ + (&((struct txgbe_adapter *)(dev)->data->dev_private)->ipsec) + /* * RX/TX function prototypes */ diff --git a/drivers/net/txgbe/txgbe_ipsec.c b/drivers/net/txgbe/txgbe_ipsec.c index b21bba237..7501e25af 100644 --- a/drivers/net/txgbe/txgbe_ipsec.c +++ b/drivers/net/txgbe/txgbe_ipsec.c @@ -13,6 +13,255 @@ #include "txgbe_ethdev.h" #include "txgbe_ipsec.h" +#define CMP_IP(a, b) (\ + (a).ipv6[0] == (b).ipv6[0] && \ + (a).ipv6[1] == (b).ipv6[1] && \ + (a).ipv6[2] == (b).ipv6[2] && \ + (a).ipv6[3] == (b).ipv6[3]) + +static int +txgbe_crypto_add_sa(struct txgbe_crypto_session *ic_session) +{ + struct rte_eth_dev *dev = ic_session->dev; + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_ipsec *priv = TXGBE_DEV_IPSEC(dev); + uint32_t reg_val; + int sa_index = -1; + + if (ic_session->op == TXGBE_OP_AUTHENTICATED_DECRYPTION) { + int i, ip_index = -1; + uint8_t *key; + + /* Find a match in the IP table*/ + for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) { + if (CMP_IP(priv->rx_ip_tbl[i].ip, + ic_session->dst_ip)) { + ip_index = i; + break; + } + } + /* If no match, find a free entry in the IP table*/ + if (ip_index < 0) { + for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) { + if (priv->rx_ip_tbl[i].ref_count == 0) { + ip_index = i; + break; + } + } + } + + /* Fail if no match and no free entries*/ + if (ip_index < 0) { + PMD_DRV_LOG(ERR, + "No free entry left in the Rx IP table\n"); + return -1; + } + + /* Find a free entry in the SA table*/ + for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) { + if (priv->rx_sa_tbl[i].used == 0) { + sa_index = i; + break; + } + } + /* Fail if no free entries*/ + if (sa_index < 0) { + PMD_DRV_LOG(ERR, + "No free entry left in the Rx SA table\n"); + return -1; + } + + priv->rx_ip_tbl[ip_index].ip.ipv6[0] = + ic_session->dst_ip.ipv6[0]; + priv->rx_ip_tbl[ip_index].ip.ipv6[1] = + ic_session->dst_ip.ipv6[1]; + priv->rx_ip_tbl[ip_index].ip.ipv6[2] = + ic_session->dst_ip.ipv6[2]; + priv->rx_ip_tbl[ip_index].ip.ipv6[3] = + ic_session->dst_ip.ipv6[3]; + priv->rx_ip_tbl[ip_index].ref_count++; + + priv->rx_sa_tbl[sa_index].spi = ic_session->spi; + priv->rx_sa_tbl[sa_index].ip_index = ip_index; + priv->rx_sa_tbl[sa_index].mode = IPSRXMOD_VALID; + if (ic_session->op == TXGBE_OP_AUTHENTICATED_DECRYPTION) + priv->rx_sa_tbl[sa_index].mode |= + (IPSRXMOD_PROTO | IPSRXMOD_DECRYPT); + if (ic_session->dst_ip.type == IPv6) { + priv->rx_sa_tbl[sa_index].mode |= IPSRXMOD_IPV6; + priv->rx_ip_tbl[ip_index].ip.type = IPv6; + } else if (ic_session->dst_ip.type == IPv4) { + priv->rx_ip_tbl[ip_index].ip.type = IPv4; + } + priv->rx_sa_tbl[sa_index].used = 1; + + /* write IP table entry*/ + reg_val = TXGBE_IPSRXIDX_ENA | TXGBE_IPSRXIDX_WRITE | + TXGBE_IPSRXIDX_TB_IP | (ip_index << 3); + if (priv->rx_ip_tbl[ip_index].ip.type == IPv4) { + wr32(hw, TXGBE_IPSRXADDR(0), 0); + wr32(hw, TXGBE_IPSRXADDR(1), 0); + wr32(hw, TXGBE_IPSRXADDR(2), 0); + wr32(hw, TXGBE_IPSRXADDR(3), + priv->rx_ip_tbl[ip_index].ip.ipv4); + } else { + wr32(hw, TXGBE_IPSRXADDR(0), + priv->rx_ip_tbl[ip_index].ip.ipv6[0]); + wr32(hw, TXGBE_IPSRXADDR(1), + priv->rx_ip_tbl[ip_index].ip.ipv6[1]); + wr32(hw, TXGBE_IPSRXADDR(2), + priv->rx_ip_tbl[ip_index].ip.ipv6[2]); + wr32(hw, TXGBE_IPSRXADDR(3), + priv->rx_ip_tbl[ip_index].ip.ipv6[3]); + } + wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000); + + /* write SPI table entry*/ + reg_val = TXGBE_IPSRXIDX_ENA | TXGBE_IPSRXIDX_WRITE | + TXGBE_IPSRXIDX_TB_SPI | (sa_index << 3); + wr32(hw, TXGBE_IPSRXSPI, + priv->rx_sa_tbl[sa_index].spi); + wr32(hw, TXGBE_IPSRXADDRIDX, + priv->rx_sa_tbl[sa_index].ip_index); + wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000); + + /* write Key table entry*/ + key = malloc(ic_session->key_len); + if (!key) + return -ENOMEM; + + memcpy(key, ic_session->key, ic_session->key_len); + + reg_val = TXGBE_IPSRXIDX_ENA | TXGBE_IPSRXIDX_WRITE | + TXGBE_IPSRXIDX_TB_KEY | (sa_index << 3); + wr32(hw, TXGBE_IPSRXKEY(0), + rte_cpu_to_be_32(*(uint32_t *)&key[12])); + wr32(hw, TXGBE_IPSRXKEY(1), + rte_cpu_to_be_32(*(uint32_t *)&key[8])); + wr32(hw, TXGBE_IPSRXKEY(2), + rte_cpu_to_be_32(*(uint32_t *)&key[4])); + wr32(hw, TXGBE_IPSRXKEY(3), + rte_cpu_to_be_32(*(uint32_t *)&key[0])); + wr32(hw, TXGBE_IPSRXSALT, + rte_cpu_to_be_32(ic_session->salt)); + wr32(hw, TXGBE_IPSRXMODE, + priv->rx_sa_tbl[sa_index].mode); + wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000); + + free(key); + } else { /* sess->dir == RTE_CRYPTO_OUTBOUND */ + uint8_t *key; + int i; + + /* Find a free entry in the SA table*/ + for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) { + if (priv->tx_sa_tbl[i].used == 0) { + sa_index = i; + break; + } + } + /* Fail if no free entries*/ + if (sa_index < 0) { + PMD_DRV_LOG(ERR, + "No free entry left in the Tx SA table\n"); + return -1; + } + + priv->tx_sa_tbl[sa_index].spi = + rte_cpu_to_be_32(ic_session->spi); + priv->tx_sa_tbl[i].used = 1; + ic_session->sa_index = sa_index; + + key = malloc(ic_session->key_len); + if (!key) + return -ENOMEM; + + memcpy(key, ic_session->key, ic_session->key_len); + + /* write Key table entry*/ + reg_val = TXGBE_IPSRXIDX_ENA | + TXGBE_IPSRXIDX_WRITE | (sa_index << 3); + wr32(hw, TXGBE_IPSTXKEY(0), + rte_cpu_to_be_32(*(uint32_t *)&key[12])); + wr32(hw, TXGBE_IPSTXKEY(1), + rte_cpu_to_be_32(*(uint32_t *)&key[8])); + wr32(hw, TXGBE_IPSTXKEY(2), + rte_cpu_to_be_32(*(uint32_t *)&key[4])); + wr32(hw, TXGBE_IPSTXKEY(3), + rte_cpu_to_be_32(*(uint32_t *)&key[0])); + wr32(hw, TXGBE_IPSTXSALT, + rte_cpu_to_be_32(ic_session->salt)); + wr32w(hw, TXGBE_IPSTXIDX, reg_val, TXGBE_IPSTXIDX_WRITE, 1000); + + free(key); + } + + return 0; +} + +static int +txgbe_crypto_create_session(void *device, + struct rte_security_session_conf *conf, + struct rte_security_session *session, + struct rte_mempool *mempool) +{ + struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device; + struct txgbe_crypto_session *ic_session = NULL; + struct rte_crypto_aead_xform *aead_xform; + struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf; + + if (rte_mempool_get(mempool, (void **)&ic_session)) { + PMD_DRV_LOG(ERR, "Cannot get object from ic_session mempool"); + return -ENOMEM; + } + + if (conf->crypto_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD || + conf->crypto_xform->aead.algo != + RTE_CRYPTO_AEAD_AES_GCM) { + PMD_DRV_LOG(ERR, "Unsupported crypto transformation mode\n"); + rte_mempool_put(mempool, (void *)ic_session); + return -ENOTSUP; + } + aead_xform = &conf->crypto_xform->aead; + + if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { + if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) { + ic_session->op = TXGBE_OP_AUTHENTICATED_DECRYPTION; + } else { + PMD_DRV_LOG(ERR, "IPsec decryption not enabled\n"); + rte_mempool_put(mempool, (void *)ic_session); + return -ENOTSUP; + } + } else { + if (dev_conf->txmode.offloads & DEV_TX_OFFLOAD_SECURITY) { + ic_session->op = TXGBE_OP_AUTHENTICATED_ENCRYPTION; + } else { + PMD_DRV_LOG(ERR, "IPsec encryption not enabled\n"); + rte_mempool_put(mempool, (void *)ic_session); + return -ENOTSUP; + } + } + + ic_session->key = aead_xform->key.data; + ic_session->key_len = aead_xform->key.length; + memcpy(&ic_session->salt, + &aead_xform->key.data[aead_xform->key.length], 4); + ic_session->spi = conf->ipsec.spi; + ic_session->dev = eth_dev; + + set_sec_session_private_data(session, ic_session); + + if (ic_session->op == TXGBE_OP_AUTHENTICATED_ENCRYPTION) { + if (txgbe_crypto_add_sa(ic_session)) { + PMD_DRV_LOG(ERR, "Failed to add SA\n"); + rte_mempool_put(mempool, (void *)ic_session); + return -EPERM; + } + } + + return 0; +} + static const struct rte_security_capability * txgbe_crypto_capabilities_get(void *device __rte_unused) { @@ -140,6 +389,7 @@ txgbe_crypto_capabilities_get(void *device __rte_unused) } static struct rte_security_ops txgbe_security_ops = { + .session_create = txgbe_crypto_create_session, .capabilities_get = txgbe_crypto_capabilities_get }; diff --git a/drivers/net/txgbe/txgbe_ipsec.h b/drivers/net/txgbe/txgbe_ipsec.h index f58ebab3d..c94775636 100644 --- a/drivers/net/txgbe/txgbe_ipsec.h +++ b/drivers/net/txgbe/txgbe_ipsec.h @@ -5,9 +5,75 @@ #ifndef TXGBE_IPSEC_H_ #define TXGBE_IPSEC_H_ +#include #include #include +#define IPSRXMOD_VALID 0x00000001 +#define IPSRXMOD_PROTO 0x00000004 +#define IPSRXMOD_DECRYPT 0x00000008 +#define IPSRXMOD_IPV6 0x00000010 + +#define IPSEC_MAX_RX_IP_COUNT 128 +#define IPSEC_MAX_SA_COUNT 1024 + +enum txgbe_operation { + TXGBE_OP_AUTHENTICATED_ENCRYPTION, + TXGBE_OP_AUTHENTICATED_DECRYPTION +}; + +/** + * Generic IP address structure + * TODO: Find better location for this rte_net.h possibly. + **/ +struct ipaddr { + enum ipaddr_type { + IPv4, + IPv6 + } type; + /**< IP Address Type - IPv4/IPv6 */ + + union { + uint32_t ipv4; + uint32_t ipv6[4]; + }; +}; + +/** inline crypto crypto private session structure */ +struct txgbe_crypto_session { + enum txgbe_operation op; + const uint8_t *key; + uint32_t key_len; + uint32_t salt; + uint32_t sa_index; + uint32_t spi; + struct ipaddr src_ip; + struct ipaddr dst_ip; + struct rte_eth_dev *dev; +} __rte_cache_aligned; + +struct txgbe_crypto_rx_ip_table { + struct ipaddr ip; + uint16_t ref_count; +}; +struct txgbe_crypto_rx_sa_table { + uint32_t spi; + uint32_t ip_index; + uint8_t mode; + uint8_t used; +}; + +struct txgbe_crypto_tx_sa_table { + uint32_t spi; + uint8_t used; +}; + +struct txgbe_ipsec { + struct txgbe_crypto_rx_ip_table rx_ip_tbl[IPSEC_MAX_RX_IP_COUNT]; + struct txgbe_crypto_rx_sa_table rx_sa_tbl[IPSEC_MAX_SA_COUNT]; + struct txgbe_crypto_tx_sa_table tx_sa_tbl[IPSEC_MAX_SA_COUNT]; +}; + int txgbe_ipsec_ctx_create(struct rte_eth_dev *dev); #endif /*TXGBE_IPSEC_H_*/ From patchwork Tue Nov 3 10:08:16 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 83556 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 33831A0521; Tue, 3 Nov 2020 11:19:37 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id E457ECACF; Tue, 3 Nov 2020 11:07:58 +0100 (CET) Received: from smtpbg501.qq.com (smtpbg501.qq.com [203.205.250.101]) by dpdk.org (Postfix) with ESMTP id 70A5DCA3B for ; Tue, 3 Nov 2020 11:07:39 +0100 (CET) X-QQ-mid: bizesmtp26t1604398054tvqnks9g Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp10.qq.com (ESMTP) with id ; Tue, 03 Nov 2020 18:07:34 +0800 (CST) X-QQ-SSF: 01400000002000C0C000B00A0000000 X-QQ-FEAT: Tj/mR7p3N/R3trrmUyL636mlki0B7X+RT8QoaplUAEs+rAYqZxQ7B8ArgIJjZ BTHPeNP7aztcYHLswyYrqK0WQjvHYDE57gzJlMp+017Hj1oDt8bqFlsA8ipI2Wjn1rwiA+u KpTRl5IxOIqoTEN+9p+gzRtCPPKRoHsR0wIV4z+BRp+PwHCk3Wy4u3W0D7C14uFFF6mWHSN R/6Vz/Wf0oqrVVYLaA2O3+0DhXdfndH1lMk3ENdm4Pc0Etiie3zukC/5YvldtW4Hmm8gWN+ nvDLoSXAfN44aNAiKrYMMO5ep0HpDUigpYg6F0CEErhNxoribGAhgwY0F7nky2dpOIQM6V7 A/QTsHQC1ESKRwMXZcj5/vDar4B4/ocNvhSQZ7I X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Tue, 3 Nov 2020 18:08:16 +0800 Message-Id: <20201103100818.311881-36-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20201103100818.311881-1-jiawenwu@trustnetic.com> References: <20201103100818.311881-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign6 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH 35/37] net/txgbe: support security session destroy X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add support to clear a security session's private data, get the size of a security session, add update the mbuf with provided metadata. Signed-off-by: Jiawen Wu --- drivers/net/txgbe/txgbe_ipsec.c | 167 ++++++++++++++++++++++++++++++++ drivers/net/txgbe/txgbe_ipsec.h | 15 +++ 2 files changed, 182 insertions(+) diff --git a/drivers/net/txgbe/txgbe_ipsec.c b/drivers/net/txgbe/txgbe_ipsec.c index 7501e25af..0bdd1c061 100644 --- a/drivers/net/txgbe/txgbe_ipsec.c +++ b/drivers/net/txgbe/txgbe_ipsec.c @@ -199,6 +199,106 @@ txgbe_crypto_add_sa(struct txgbe_crypto_session *ic_session) return 0; } +static int +txgbe_crypto_remove_sa(struct rte_eth_dev *dev, + struct txgbe_crypto_session *ic_session) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_ipsec *priv = TXGBE_DEV_IPSEC(dev); + uint32_t reg_val; + int sa_index = -1; + + if (ic_session->op == TXGBE_OP_AUTHENTICATED_DECRYPTION) { + int i, ip_index = -1; + + /* Find a match in the IP table*/ + for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) { + if (CMP_IP(priv->rx_ip_tbl[i].ip, ic_session->dst_ip)) { + ip_index = i; + break; + } + } + + /* Fail if no match*/ + if (ip_index < 0) { + PMD_DRV_LOG(ERR, + "Entry not found in the Rx IP table\n"); + return -1; + } + + /* Find a free entry in the SA table*/ + for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) { + if (priv->rx_sa_tbl[i].spi == + rte_cpu_to_be_32(ic_session->spi)) { + sa_index = i; + break; + } + } + /* Fail if no match*/ + if (sa_index < 0) { + PMD_DRV_LOG(ERR, + "Entry not found in the Rx SA table\n"); + return -1; + } + + /* Disable and clear Rx SPI and key table table entryes*/ + reg_val = TXGBE_IPSRXIDX_WRITE | + TXGBE_IPSRXIDX_TB_SPI | (sa_index << 3); + wr32(hw, TXGBE_IPSRXSPI, 0); + wr32(hw, TXGBE_IPSRXADDRIDX, 0); + wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000); + reg_val = TXGBE_IPSRXIDX_WRITE | + TXGBE_IPSRXIDX_TB_KEY | (sa_index << 3); + wr32(hw, TXGBE_IPSRXKEY(0), 0); + wr32(hw, TXGBE_IPSRXKEY(1), 0); + wr32(hw, TXGBE_IPSRXKEY(2), 0); + wr32(hw, TXGBE_IPSRXKEY(3), 0); + wr32(hw, TXGBE_IPSRXSALT, 0); + wr32(hw, TXGBE_IPSRXMODE, 0); + wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000); + priv->rx_sa_tbl[sa_index].used = 0; + + /* If last used then clear the IP table entry*/ + priv->rx_ip_tbl[ip_index].ref_count--; + if (priv->rx_ip_tbl[ip_index].ref_count == 0) { + reg_val = TXGBE_IPSRXIDX_WRITE | TXGBE_IPSRXIDX_TB_IP | + (ip_index << 3); + wr32(hw, TXGBE_IPSRXADDR(0), 0); + wr32(hw, TXGBE_IPSRXADDR(1), 0); + wr32(hw, TXGBE_IPSRXADDR(2), 0); + wr32(hw, TXGBE_IPSRXADDR(3), 0); + } + } else { /* session->dir == RTE_CRYPTO_OUTBOUND */ + int i; + + /* Find a match in the SA table*/ + for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) { + if (priv->tx_sa_tbl[i].spi == + rte_cpu_to_be_32(ic_session->spi)) { + sa_index = i; + break; + } + } + /* Fail if no match entries*/ + if (sa_index < 0) { + PMD_DRV_LOG(ERR, + "Entry not found in the Tx SA table\n"); + return -1; + } + reg_val = TXGBE_IPSRXIDX_WRITE | (sa_index << 3); + wr32(hw, TXGBE_IPSTXKEY(0), 0); + wr32(hw, TXGBE_IPSTXKEY(1), 0); + wr32(hw, TXGBE_IPSTXKEY(2), 0); + wr32(hw, TXGBE_IPSTXKEY(3), 0); + wr32(hw, TXGBE_IPSTXSALT, 0); + wr32w(hw, TXGBE_IPSTXIDX, reg_val, TXGBE_IPSTXIDX_WRITE, 1000); + + priv->tx_sa_tbl[sa_index].used = 0; + } + + return 0; +} + static int txgbe_crypto_create_session(void *device, struct rte_security_session_conf *conf, @@ -262,6 +362,70 @@ txgbe_crypto_create_session(void *device, return 0; } +static unsigned int +txgbe_crypto_session_get_size(__rte_unused void *device) +{ + return sizeof(struct txgbe_crypto_session); +} + +static int +txgbe_crypto_remove_session(void *device, + struct rte_security_session *session) +{ + struct rte_eth_dev *eth_dev = device; + struct txgbe_crypto_session *ic_session = + (struct txgbe_crypto_session *) + get_sec_session_private_data(session); + struct rte_mempool *mempool = rte_mempool_from_obj(ic_session); + + if (eth_dev != ic_session->dev) { + PMD_DRV_LOG(ERR, "Session not bound to this device\n"); + return -ENODEV; + } + + if (txgbe_crypto_remove_sa(eth_dev, ic_session)) { + PMD_DRV_LOG(ERR, "Failed to remove session\n"); + return -EFAULT; + } + + rte_mempool_put(mempool, (void *)ic_session); + + return 0; +} + +static inline uint8_t +txgbe_crypto_compute_pad_len(struct rte_mbuf *m) +{ + if (m->nb_segs == 1) { + /* 16 bytes ICV + 2 bytes ESP trailer + payload padding size + * payload padding size is stored at + */ + uint8_t *esp_pad_len = rte_pktmbuf_mtod_offset(m, uint8_t *, + rte_pktmbuf_pkt_len(m) - + (ESP_TRAILER_SIZE + ESP_ICV_SIZE)); + return *esp_pad_len + ESP_TRAILER_SIZE + ESP_ICV_SIZE; + } + return 0; +} + +static int +txgbe_crypto_update_mb(void *device __rte_unused, + struct rte_security_session *session, + struct rte_mbuf *m, void *params __rte_unused) +{ + struct txgbe_crypto_session *ic_session = + get_sec_session_private_data(session); + if (ic_session->op == TXGBE_OP_AUTHENTICATED_ENCRYPTION) { + union txgbe_crypto_tx_desc_md *mdata = + (union txgbe_crypto_tx_desc_md *) + rte_security_dynfield(m); + mdata->enc = 1; + mdata->sa_idx = ic_session->sa_index; + mdata->pad_len = txgbe_crypto_compute_pad_len(m); + } + return 0; +} + static const struct rte_security_capability * txgbe_crypto_capabilities_get(void *device __rte_unused) { @@ -390,6 +554,9 @@ txgbe_crypto_capabilities_get(void *device __rte_unused) static struct rte_security_ops txgbe_security_ops = { .session_create = txgbe_crypto_create_session, + .session_get_size = txgbe_crypto_session_get_size, + .session_destroy = txgbe_crypto_remove_session, + .set_pkt_metadata = txgbe_crypto_update_mb, .capabilities_get = txgbe_crypto_capabilities_get }; diff --git a/drivers/net/txgbe/txgbe_ipsec.h b/drivers/net/txgbe/txgbe_ipsec.h index c94775636..d022a255f 100644 --- a/drivers/net/txgbe/txgbe_ipsec.h +++ b/drivers/net/txgbe/txgbe_ipsec.h @@ -17,6 +17,9 @@ #define IPSEC_MAX_RX_IP_COUNT 128 #define IPSEC_MAX_SA_COUNT 1024 +#define ESP_ICV_SIZE 16 +#define ESP_TRAILER_SIZE 2 + enum txgbe_operation { TXGBE_OP_AUTHENTICATED_ENCRYPTION, TXGBE_OP_AUTHENTICATED_DECRYPTION @@ -68,6 +71,18 @@ struct txgbe_crypto_tx_sa_table { uint8_t used; }; +union txgbe_crypto_tx_desc_md { + uint64_t data; + struct { + /**< SA table index */ + uint32_t sa_idx; + /**< ICV and ESP trailer length */ + uint8_t pad_len; + /**< enable encryption */ + uint8_t enc; + }; +}; + struct txgbe_ipsec { struct txgbe_crypto_rx_ip_table rx_ip_tbl[IPSEC_MAX_RX_IP_COUNT]; struct txgbe_crypto_rx_sa_table rx_sa_tbl[IPSEC_MAX_SA_COUNT]; From patchwork Tue Nov 3 10:08:17 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 83558 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 55CCBA0521; Tue, 3 Nov 2020 11:20:15 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 3BD21CAF1; Tue, 3 Nov 2020 11:08:02 +0100 (CET) Received: from smtpbg506.qq.com (smtpbg506.qq.com [203.205.250.33]) by dpdk.org (Postfix) with ESMTP id 4904CC9B8 for ; Tue, 3 Nov 2020 11:07:41 +0100 (CET) X-QQ-mid: bizesmtp26t1604398055tkako3nl Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp10.qq.com (ESMTP) with id ; Tue, 03 Nov 2020 18:07:35 +0800 (CST) X-QQ-SSF: 01400000002000C0C000B00A0000000 X-QQ-FEAT: ql6Zc2TW8Jx/mwN6AB2lO145rj8KkT//p47nEDtKF424DCtKvTFip5vz8PHuR qXRem3Uomha+I2sgJ9P0s2sJ+gWOEqU84uzSF9jLk9gsfLK5iyaeXZNBBajcadsRliPWjit /2w3/tGnztRh2seRJfI3VUPgIK/zUMPHuDMGLyJygppKZ7+fMVuQqwbbRPJ4EW4dD0rV3AX pSubyHQtoA3VmPfglzB336OwkMvBV5KxXqqi2JtvwxbatCwhdC7W74CH86m++ASRcNN4fyx oZcfltQwKeeb/JpcWE0ILr49MgmE7hWrZJczMWW9PGppEn5jC2p/1drCRtHrbkiq4d0sitS MZdsxXrft0OTKA4uflFk/m88rYNmA== X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Tue, 3 Nov 2020 18:08:17 +0800 Message-Id: <20201103100818.311881-37-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20201103100818.311881-1-jiawenwu@trustnetic.com> References: <20201103100818.311881-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign7 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH 36/37] net/txgbe: add security offload in Rx and Tx process X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add security offload in Rx and Tx process. Signed-off-by: Jiawen Wu --- drivers/net/txgbe/txgbe_ipsec.c | 106 ++++++++++++++++++++++++++++++++ drivers/net/txgbe/txgbe_ipsec.h | 1 + drivers/net/txgbe/txgbe_rxtx.c | 93 +++++++++++++++++++++++++++- drivers/net/txgbe/txgbe_rxtx.h | 13 ++++ 4 files changed, 211 insertions(+), 2 deletions(-) diff --git a/drivers/net/txgbe/txgbe_ipsec.c b/drivers/net/txgbe/txgbe_ipsec.c index 0bdd1c061..f8c54f3d4 100644 --- a/drivers/net/txgbe/txgbe_ipsec.c +++ b/drivers/net/txgbe/txgbe_ipsec.c @@ -19,6 +19,55 @@ (a).ipv6[2] == (b).ipv6[2] && \ (a).ipv6[3] == (b).ipv6[3]) +static void +txgbe_crypto_clear_ipsec_tables(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_ipsec *priv = TXGBE_DEV_IPSEC(dev); + int i = 0; + + /* clear Rx IP table*/ + for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) { + uint16_t index = i << 3; + uint32_t reg_val = TXGBE_IPSRXIDX_WRITE | + TXGBE_IPSRXIDX_TB_IP | index; + wr32(hw, TXGBE_IPSRXADDR(0), 0); + wr32(hw, TXGBE_IPSRXADDR(1), 0); + wr32(hw, TXGBE_IPSRXADDR(2), 0); + wr32(hw, TXGBE_IPSRXADDR(3), 0); + wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000); + } + + /* clear Rx SPI and Rx/Tx SA tables*/ + for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) { + uint32_t index = i << 3; + uint32_t reg_val = TXGBE_IPSRXIDX_WRITE | + TXGBE_IPSRXIDX_TB_SPI | index; + wr32(hw, TXGBE_IPSRXSPI, 0); + wr32(hw, TXGBE_IPSRXADDRIDX, 0); + wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000); + reg_val = TXGBE_IPSRXIDX_WRITE | TXGBE_IPSRXIDX_TB_KEY | index; + wr32(hw, TXGBE_IPSRXKEY(0), 0); + wr32(hw, TXGBE_IPSRXKEY(1), 0); + wr32(hw, TXGBE_IPSRXKEY(2), 0); + wr32(hw, TXGBE_IPSRXKEY(3), 0); + wr32(hw, TXGBE_IPSRXSALT, 0); + wr32(hw, TXGBE_IPSRXMODE, 0); + wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000); + reg_val = TXGBE_IPSTXIDX_WRITE | index; + wr32(hw, TXGBE_IPSTXKEY(0), 0); + wr32(hw, TXGBE_IPSTXKEY(1), 0); + wr32(hw, TXGBE_IPSTXKEY(2), 0); + wr32(hw, TXGBE_IPSTXKEY(3), 0); + wr32(hw, TXGBE_IPSTXSALT, 0); + wr32w(hw, TXGBE_IPSTXIDX, reg_val, TXGBE_IPSTXIDX_WRITE, 1000); + } + + memset(priv->rx_ip_tbl, 0, sizeof(priv->rx_ip_tbl)); + memset(priv->rx_sa_tbl, 0, sizeof(priv->rx_sa_tbl)); + memset(priv->tx_sa_tbl, 0, sizeof(priv->tx_sa_tbl)); +} + static int txgbe_crypto_add_sa(struct txgbe_crypto_session *ic_session) { @@ -552,6 +601,63 @@ txgbe_crypto_capabilities_get(void *device __rte_unused) return txgbe_security_capabilities; } +int +txgbe_crypto_enable_ipsec(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + uint32_t reg; + uint64_t rx_offloads; + uint64_t tx_offloads; + + rx_offloads = dev->data->dev_conf.rxmode.offloads; + tx_offloads = dev->data->dev_conf.txmode.offloads; + + /* sanity checks */ + if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) { + PMD_DRV_LOG(ERR, "RSC and IPsec not supported"); + return -1; + } + if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) { + PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec"); + return -1; + } + + /* Set TXGBE_SECTXBUFFAF to 0x14 as required in the datasheet*/ + wr32(hw, TXGBE_SECTXBUFAF, 0x14); + + /* IFG needs to be set to 3 when we are using security. Otherwise a Tx + * hang will occur with heavy traffic. + */ + reg = rd32(hw, TXGBE_SECTXIFG); + reg = (reg & ~TXGBE_SECTXIFG_MIN_MASK) | TXGBE_SECTXIFG_MIN(0x3); + wr32(hw, TXGBE_SECTXIFG, reg); + + reg = rd32(hw, TXGBE_SECRXCTL); + reg |= TXGBE_SECRXCTL_CRCSTRIP; + wr32(hw, TXGBE_SECRXCTL, reg); + + if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) { + wr32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA, 0); + reg = rd32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA); + if (reg != 0) { + PMD_DRV_LOG(ERR, "Error enabling Rx Crypto"); + return -1; + } + } + if (tx_offloads & DEV_TX_OFFLOAD_SECURITY) { + wr32(hw, TXGBE_SECTXCTL, TXGBE_SECTXCTL_STFWD); + reg = rd32(hw, TXGBE_SECTXCTL); + if (reg != TXGBE_SECTXCTL_STFWD) { + PMD_DRV_LOG(ERR, "Error enabling Rx Crypto"); + return -1; + } + } + + txgbe_crypto_clear_ipsec_tables(dev); + + return 0; +} + static struct rte_security_ops txgbe_security_ops = { .session_create = txgbe_crypto_create_session, .session_get_size = txgbe_crypto_session_get_size, diff --git a/drivers/net/txgbe/txgbe_ipsec.h b/drivers/net/txgbe/txgbe_ipsec.h index d022a255f..54c27d8ce 100644 --- a/drivers/net/txgbe/txgbe_ipsec.h +++ b/drivers/net/txgbe/txgbe_ipsec.h @@ -90,5 +90,6 @@ struct txgbe_ipsec { }; int txgbe_ipsec_ctx_create(struct rte_eth_dev *dev); +int txgbe_crypto_enable_ipsec(struct rte_eth_dev *dev); #endif /*TXGBE_IPSEC_H_*/ diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c index f99924a3f..18588b985 100644 --- a/drivers/net/txgbe/txgbe_rxtx.c +++ b/drivers/net/txgbe/txgbe_rxtx.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -60,6 +61,9 @@ static const u64 TXGBE_TX_OFFLOAD_MASK = (PKT_TX_IP_CKSUM | PKT_TX_MACSEC | #endif PKT_TX_OUTER_IP_CKSUM | +#ifdef RTE_LIB_SECURITY + PKT_TX_SEC_OFFLOAD | +#endif TXGBE_TX_IEEE1588_TMST); #define TXGBE_TX_OFFLOAD_NOTSUP_MASK \ @@ -314,7 +318,8 @@ txgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, static inline void txgbe_set_xmit_ctx(struct txgbe_tx_queue *txq, volatile struct txgbe_tx_ctx_desc *ctx_txd, - uint64_t ol_flags, union txgbe_tx_offload tx_offload) + uint64_t ol_flags, union txgbe_tx_offload tx_offload, + __rte_unused uint64_t *mdata) { union txgbe_tx_offload tx_offload_mask; uint32_t type_tucmd_mlhl; @@ -408,6 +413,19 @@ txgbe_set_xmit_ctx(struct txgbe_tx_queue *txq, vlan_macip_lens |= TXGBE_TXD_VLAN(tx_offload.vlan_tci); } +#ifdef RTE_LIB_SECURITY + if (ol_flags & PKT_TX_SEC_OFFLOAD) { + union txgbe_crypto_tx_desc_md *md = + (union txgbe_crypto_tx_desc_md *)mdata; + tunnel_seed |= TXGBE_TXD_IPSEC_SAIDX(md->sa_idx); + type_tucmd_mlhl |= md->enc ? + (TXGBE_TXD_IPSEC_ESP | TXGBE_TXD_IPSEC_ESPENC) : 0; + type_tucmd_mlhl |= TXGBE_TXD_IPSEC_ESPLEN(md->pad_len); + tx_offload_mask.sa_idx |= ~0; + tx_offload_mask.sec_pad_len |= ~0; + } +#endif + txq->ctx_cache[ctx_idx].flags = ol_flags; txq->ctx_cache[ctx_idx].tx_offload.data[0] = tx_offload_mask.data[0] & tx_offload.data[0]; @@ -704,6 +722,9 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint32_t ctx = 0; uint32_t new_ctx; union txgbe_tx_offload tx_offload; +#ifdef RTE_LIB_SECURITY + uint8_t use_ipsec; +#endif tx_offload.data[0] = 0; tx_offload.data[1] = 0; @@ -730,6 +751,9 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, * are needed for offload functionality. */ ol_flags = tx_pkt->ol_flags; +#ifdef RTE_LIB_SECURITY + use_ipsec = txq->using_ipsec && (ol_flags & PKT_TX_SEC_OFFLOAD); +#endif /* If hardware offload required */ tx_ol_req = ol_flags & TXGBE_TX_OFFLOAD_MASK; @@ -745,6 +769,16 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, tx_offload.outer_l3_len = tx_pkt->outer_l3_len; tx_offload.outer_tun_len = txgbe_get_tun_len(tx_pkt); +#ifdef RTE_LIB_SECURITY + if (use_ipsec) { + union txgbe_crypto_tx_desc_md *ipsec_mdata = + (union txgbe_crypto_tx_desc_md *) + rte_security_dynfield(tx_pkt); + tx_offload.sa_idx = ipsec_mdata->sa_idx; + tx_offload.sec_pad_len = ipsec_mdata->pad_len; + } +#endif + /* If new context need be built or reuse the exist ctx*/ ctx = what_ctx_update(txq, tx_ol_req, tx_offload); /* Only allocate context descriptor if required */ @@ -898,7 +932,8 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, } txgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, - tx_offload); + tx_offload, + rte_security_dynfield(tx_pkt)); txe->last_id = tx_last; tx_id = txe->next_id; @@ -917,6 +952,10 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, } olinfo_status |= TXGBE_TXD_PAYLEN(pkt_len); +#ifdef RTE_LIB_SECURITY + if (use_ipsec) + olinfo_status |= TXGBE_TXD_IPSEC; +#endif m_seg = tx_pkt; do { @@ -1101,6 +1140,14 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status) pkt_flags |= PKT_RX_EIP_CKSUM_BAD; } +#ifdef RTE_LIB_SECURITY + if (rx_status & TXGBE_RXD_STAT_SECP) { + pkt_flags |= PKT_RX_SEC_OFFLOAD; + if (rx_status & TXGBE_RXD_ERR_SECERR) + pkt_flags |= PKT_RX_SEC_OFFLOAD_FAILED; + } +#endif + return pkt_flags; } @@ -1929,6 +1976,11 @@ txgbe_get_rx_port_offloads(struct rte_eth_dev *dev) offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; +#ifdef RTE_LIB_SECURITY + if (dev->security_ctx) + offloads |= DEV_RX_OFFLOAD_SECURITY; +#endif + return offloads; } @@ -2030,6 +2082,9 @@ txgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt) { struct txgbe_tx_queue *txq = (struct txgbe_tx_queue *)tx_queue; if (txq->offloads == 0 && +#ifdef RTE_LIB_SECURITY + !(txq->using_ipsec) && +#endif txq->tx_free_thresh >= RTE_PMD_TXGBE_TX_MAX_BURST) return txgbe_tx_done_cleanup_simple(txq, free_cnt); @@ -2113,6 +2168,9 @@ txgbe_set_tx_function(struct rte_eth_dev *dev, struct txgbe_tx_queue *txq) { /* Use a simple Tx queue (no offloads, no multi segs) if possible */ if (txq->offloads == 0 && +#ifdef RTE_LIB_SECURITY + !(txq->using_ipsec) && +#endif txq->tx_free_thresh >= RTE_PMD_TXGBE_TX_MAX_BURST) { PMD_INIT_LOG(DEBUG, "Using simple tx code path"); dev->tx_pkt_burst = txgbe_xmit_pkts_simple; @@ -2167,6 +2225,10 @@ txgbe_get_tx_port_offloads(struct rte_eth_dev *dev) tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; +#ifdef RTE_LIB_SECURITY + if (dev->security_ctx) + tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY; +#endif return tx_offload_capa; } @@ -2265,6 +2327,10 @@ txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->offloads = offloads; txq->ops = &def_txq_ops; txq->tx_deferred_start = tx_conf->tx_deferred_start; +#ifdef RTE_LIB_SECURITY + txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_SECURITY); +#endif /* Modification to set tail pointer for virtual function * if vf is detected. @@ -4065,6 +4131,7 @@ txgbe_set_rsc(struct rte_eth_dev *dev) void __rte_cold txgbe_set_rx_function(struct rte_eth_dev *dev) { + uint16_t i; struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev); /* @@ -4125,6 +4192,15 @@ txgbe_set_rx_function(struct rte_eth_dev *dev) dev->rx_pkt_burst = txgbe_recv_pkts; } + +#ifdef RTE_LIB_SECURITY + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct txgbe_rx_queue *rxq = dev->data->rx_queues[i]; + + rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_SECURITY); + } +#endif } /* @@ -4395,6 +4471,19 @@ txgbe_dev_rxtx_start(struct rte_eth_dev *dev) dev->data->dev_conf.lpbk_mode) txgbe_setup_loopback_link_raptor(hw); +#ifdef RTE_LIB_SECURITY + if ((dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) || + (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_SECURITY)) { + ret = txgbe_crypto_enable_ipsec(dev); + if (ret != 0) { + PMD_DRV_LOG(ERR, + "txgbe_crypto_enable_ipsec fails with %d.", + ret); + return ret; + } + } +#endif + return 0; } diff --git a/drivers/net/txgbe/txgbe_rxtx.h b/drivers/net/txgbe/txgbe_rxtx.h index 6e0e86ce5..203bdeb88 100644 --- a/drivers/net/txgbe/txgbe_rxtx.h +++ b/drivers/net/txgbe/txgbe_rxtx.h @@ -293,6 +293,10 @@ struct txgbe_rx_queue { uint16_t rx_nb_avail; /**< nr of staged pkts ready to ret to app */ uint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */ uint16_t rx_free_trigger; /**< triggers rx buffer allocation */ +#ifdef RTE_LIB_SECURITY + uint8_t using_ipsec; + /**< indicates that IPsec RX feature is in use */ +#endif uint16_t rx_free_thresh; /**< max free RX desc to hold. */ uint16_t queue_id; /**< RX queue index. */ uint16_t reg_idx; /**< RX queue register index. */ @@ -336,6 +340,11 @@ union txgbe_tx_offload { uint64_t outer_tun_len:8; /**< Outer TUN (Tunnel) Hdr Length. */ uint64_t outer_l2_len:8; /**< Outer L2 (MAC) Hdr Length. */ uint64_t outer_l3_len:16; /**< Outer L3 (IP) Hdr Length. */ +#ifdef RTE_LIB_SECURITY + /* inline ipsec related*/ + uint64_t sa_idx:8; /**< TX SA database entry index */ + uint64_t sec_pad_len:4; /**< padding length */ +#endif }; }; @@ -388,6 +397,10 @@ struct txgbe_tx_queue { struct txgbe_ctx_info ctx_cache[TXGBE_CTX_NUM]; const struct txgbe_txq_ops *ops; /**< txq ops */ uint8_t tx_deferred_start; /**< not in global dev start. */ +#ifdef RTE_LIB_SECURITY + uint8_t using_ipsec; + /**< indicates that IPsec TX feature is in use */ +#endif }; struct txgbe_txq_ops { From patchwork Tue Nov 3 10:08:18 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 83557 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 87FECA0521; Tue, 3 Nov 2020 11:19:57 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id B4DA9CADE; Tue, 3 Nov 2020 11:08:00 +0100 (CET) Received: from smtpproxy21.qq.com (smtpbg704.qq.com [203.205.195.105]) by dpdk.org (Postfix) with ESMTP id 284BCC9B8 for ; Tue, 3 Nov 2020 11:07:41 +0100 (CET) X-QQ-mid: bizesmtp26t1604398057tccx58y5 Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp10.qq.com (ESMTP) with id ; Tue, 03 Nov 2020 18:07:36 +0800 (CST) X-QQ-SSF: 01400000002000C0C000B00A0000000 X-QQ-FEAT: N/s2IhZC2kBFTtErlqgn1dEz21PIBU0fXdsGS0YHIg0QLuzGlSg7EUssut5vG ORIyBxZF3QHJsLw46D5C7BUbscu35+CIvNtTCp4EYOPp7qTkRO88tEAm1PqsMvkV5r+1+6q jPUaL6pvo2GQzwZ12dnefVyAkW/GnK+6dOEfjkXAAJApY6DwtqQdv5sUe7uHYofLvfTgM0R WjwwEMU7drxu948mCASWxCHxexmQwJ46UyVtCBUhx5JBc8D+m2/ISw09fPbU8sGRLXDjpl3 7D5bXHEnTLygWGnMvag3077xvSeyLy7Wa8T4k8AGI4yZ86CDdJDRunUuDFGmD8WSRyow98j 40iCmuh/cdqEFxS4JuwWDsPzuUPcQ== X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Tue, 3 Nov 2020 18:08:18 +0800 Message-Id: <20201103100818.311881-38-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20201103100818.311881-1-jiawenwu@trustnetic.com> References: <20201103100818.311881-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign5 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH 37/37] net/txgbe: add security type in flow action X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add security type in flow action. Signed-off-by: Jiawen Wu --- drivers/net/txgbe/txgbe_flow.c | 52 +++++++++++++++++++++++++++++++++ drivers/net/txgbe/txgbe_ipsec.c | 30 +++++++++++++++++++ drivers/net/txgbe/txgbe_ipsec.h | 3 ++ 3 files changed, 85 insertions(+) diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c index b5f4073e2..5ca89d619 100644 --- a/drivers/net/txgbe/txgbe_flow.c +++ b/drivers/net/txgbe/txgbe_flow.c @@ -145,6 +145,9 @@ const struct rte_flow_action *next_no_void_action( * END * other members in mask and spec should set to 0x00. * item->last should be NULL. + * + * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY. + * */ static int cons_parse_ntuple_filter(const struct rte_flow_attr *attr, @@ -193,6 +196,43 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, memset(ð_null, 0, sizeof(struct rte_flow_item_eth)); memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan)); +#ifdef RTE_LIB_SECURITY + /** + * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY + */ + act = next_no_void_action(actions, NULL); + if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) { + const void *conf = act->conf; + /* check if the next not void item is END */ + act = next_no_void_action(actions, act); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + /* get the IP pattern*/ + item = next_no_void_pattern(pattern, NULL); + while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_IPV6) { + if (item->last || + item->type == RTE_FLOW_ITEM_TYPE_END) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "IP pattern missing."); + return -rte_errno; + } + item = next_no_void_pattern(pattern, item); + } + + filter->proto = IPPROTO_ESP; + return txgbe_crypto_add_ingress_sa_from_flow(conf, item->spec, + item->type == RTE_FLOW_ITEM_TYPE_IPV6); + } +#endif + /* the first not void item can be MAC or IPv4 */ item = next_no_void_pattern(pattern, NULL); @@ -563,6 +603,12 @@ txgbe_parse_ntuple_filter(struct rte_eth_dev *dev, if (ret) return ret; +#ifdef RTE_LIB_SECURITY + /* ESP flow not really a flow */ + if (filter->proto == IPPROTO_ESP) + return 0; +#endif + /* txgbe doesn't support tcp flags */ if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) { memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); @@ -2690,6 +2736,12 @@ txgbe_flow_create(struct rte_eth_dev *dev, ret = txgbe_parse_ntuple_filter(dev, attr, pattern, actions, &ntuple_filter, error); +#ifdef RTE_LIB_SECURITY + /* ESP flow not really a flow*/ + if (ntuple_filter.proto == IPPROTO_ESP) + return flow; +#endif + if (!ret) { ret = txgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE); if (!ret) { diff --git a/drivers/net/txgbe/txgbe_ipsec.c b/drivers/net/txgbe/txgbe_ipsec.c index f8c54f3d4..d0543ce0b 100644 --- a/drivers/net/txgbe/txgbe_ipsec.c +++ b/drivers/net/txgbe/txgbe_ipsec.c @@ -658,6 +658,36 @@ txgbe_crypto_enable_ipsec(struct rte_eth_dev *dev) return 0; } +int +txgbe_crypto_add_ingress_sa_from_flow(const void *sess, + const void *ip_spec, + uint8_t is_ipv6) +{ + struct txgbe_crypto_session *ic_session = + get_sec_session_private_data(sess); + + if (ic_session->op == TXGBE_OP_AUTHENTICATED_DECRYPTION) { + if (is_ipv6) { + const struct rte_flow_item_ipv6 *ipv6 = ip_spec; + ic_session->src_ip.type = IPv6; + ic_session->dst_ip.type = IPv6; + rte_memcpy(ic_session->src_ip.ipv6, + ipv6->hdr.src_addr, 16); + rte_memcpy(ic_session->dst_ip.ipv6, + ipv6->hdr.dst_addr, 16); + } else { + const struct rte_flow_item_ipv4 *ipv4 = ip_spec; + ic_session->src_ip.type = IPv4; + ic_session->dst_ip.type = IPv4; + ic_session->src_ip.ipv4 = ipv4->hdr.src_addr; + ic_session->dst_ip.ipv4 = ipv4->hdr.dst_addr; + } + return txgbe_crypto_add_sa(ic_session); + } + + return 0; +} + static struct rte_security_ops txgbe_security_ops = { .session_create = txgbe_crypto_create_session, .session_get_size = txgbe_crypto_session_get_size, diff --git a/drivers/net/txgbe/txgbe_ipsec.h b/drivers/net/txgbe/txgbe_ipsec.h index 54c27d8ce..1e5678437 100644 --- a/drivers/net/txgbe/txgbe_ipsec.h +++ b/drivers/net/txgbe/txgbe_ipsec.h @@ -91,5 +91,8 @@ struct txgbe_ipsec { int txgbe_ipsec_ctx_create(struct rte_eth_dev *dev); int txgbe_crypto_enable_ipsec(struct rte_eth_dev *dev); +int txgbe_crypto_add_ingress_sa_from_flow(const void *sess, + const void *ip_spec, + uint8_t is_ipv6); #endif /*TXGBE_IPSEC_H_*/