From patchwork Wed Jul 8 21:39:45 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Andrey Vesnovaty X-Patchwork-Id: 73560 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id BAFFBA0526; Wed, 8 Jul 2020 23:40:49 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 5113B1E552; Wed, 8 Jul 2020 23:40:08 +0200 (CEST) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id E71A91E49A for ; Wed, 8 Jul 2020 23:39:57 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE1 (envelope-from andreyv@mellanox.com) with SMTP; 9 Jul 2020 00:39:55 +0300 Received: from r-arch-host11.mtr.labs.mlnx. (r-arch-host11.mtr.labs.mlnx [10.213.43.60]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 068LdrB7032740; Thu, 9 Jul 2020 00:39:54 +0300 From: Andrey Vesnovaty To: dev@dpdk.org Cc: jer@marvell.com, jerinjacobk@gmail.com, thomas@monjalon.net, ferruh.yigit@intel.com, stephen@networkplumber.org, bruce.richardson@intel.com, orika@mellanox.com, viacheslavo@mellanox.com, andrey.vesnovaty@gmail.com, Marko Kovacevic , Radu Nicolau , Akhil Goyal , Tomasz Kantecki , Sunil Kumar Kori , Pavan Nikhilesh , John McNamara Date: Thu, 9 Jul 2020 00:39:45 +0300 Message-Id: <20200708213946.30108-7-andreyv@mellanox.com> X-Mailer: git-send-email 2.26.2 In-Reply-To: <20200708213946.30108-1-andreyv@mellanox.com> References: <20200702120511.16315-1-andreyv@mellanox.com> <20200708213946.30108-1-andreyv@mellanox.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v2 6/6] examples/flow_filtering: utilize shared RSS action X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This commit gives very first shared RSS action usage example and demonstrates shared action capability for in-place update. First application creates shared action during initialization phase. Later on the flow object created by application uses previously created shared RSS action with 1 queue configured instead of queue action in original application. On each RX queue burst shared RSS action reconfigured via rte_flow_shared_action_update() API to switch queue 0 to 1 & 1 to 0. User supposed to observe consistent queue switches on each packet burst. Signed-off-by: Andrey Vesnovaty --- doc/guides/sample_app_ug/flow_filtering.rst | 62 +++++++++++++++++---- examples/flow_filtering/flow_blocks.c | 30 +++++----- examples/flow_filtering/main.c | 41 +++++++++++++- 3 files changed, 105 insertions(+), 28 deletions(-) diff --git a/doc/guides/sample_app_ug/flow_filtering.rst b/doc/guides/sample_app_ug/flow_filtering.rst index 5e5a6cd8a0..cfe9334717 100644 --- a/doc/guides/sample_app_ug/flow_filtering.rst +++ b/doc/guides/sample_app_ug/flow_filtering.rst @@ -106,7 +106,7 @@ following code: .. code-block:: c /* create flow for send packet with */ - flow = generate_ipv4_flow(port_id, selected_queue, + flow = generate_ipv4_flow(port_id, shared_action, SRC_IP, EMPTY_MASK, DEST_IP, FULL_MASK, &error); if (!flow) { @@ -242,7 +242,7 @@ The Ethernet port is configured with default settings using the rxq_conf = dev_info.default_rxconf; rxq_conf.offloads = port_conf.rxmode.offloads; -For this example we are configuring number of rx and tx queues that are connected +For this example we are configuring 2 rx and 2 tx queues that are connected to a single port. .. code-block:: c @@ -270,13 +270,22 @@ to a single port. } } +Before we create the flow we create shared action in order to send it as +actions argument when creating a flow. The action is single queue RSS action +similar to action queue with the only difference that shared RSS action +provides update capability after action creation. + +.. code-block:: c + + shared_action = rte_flow_shared_action_create(port_id, &action, &error); + In the next step we create and apply the flow rule. which is to send packets with destination ip equals to 192.168.1.1 to queue number 1. The detail explanation of the ``generate_ipv4_flow()`` appears later in this document: .. code-block:: c - flow = generate_ipv4_flow(port_id, selected_queue, + flow = generate_ipv4_flow(port_id, shared_action, SRC_IP, EMPTY_MASK, DEST_IP, FULL_MASK, &error); @@ -339,6 +348,21 @@ looks like the following: printf("\n"); rte_pktmbuf_free(m); } + if (rss_queue[0] == 0) { + printf(">>> switching queue 0 -> 1\n"); + rss_queue[0] = 1; + } else { + printf(">>> switching queue 1 -> 0\n"); + rss_queue[0] = 0; + } + ret = rte_flow_shared_action_update + (port_id, shared_action, &action, + &error); + if (ret) + rte_exit(EXIT_FAILURE, + ":: error: RSS action update " + "failed: %s\n", + rte_strerror(-ret)); } } } @@ -348,6 +372,8 @@ looks like the following: rte_eth_dev_close(port_id); } +On each loop eteration Rx queue switched using +``rte_flow_shared_action_update()`` API. The main work of the application is reading the packets from all queues and printing for each packet the destination queue: @@ -365,6 +391,21 @@ queues and printing for each packet the destination queue: printf(" - queue=0x%x", (unsigned int)i); printf("\n"); rte_pktmbuf_free(m); + if (rss_queue[0] == 0) { + printf(">>> switching queue 0 -> 1\n"); + rss_queue[0] = 1; + } else { + printf(">>> switching queue 1 -> 0\n"); + rss_queue[0] = 0; + } + ret = rte_flow_shared_action_update + (port_id, shared_action, &action, + &error); + if (ret) + rte_exit(EXIT_FAILURE, + ":: error: RSS action update " + "failed: %s\n", + rte_strerror(-ret)); } } } @@ -378,13 +419,15 @@ The forwarding loop can be interrupted and the application closed using The generate_ipv4_flow function ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + The generate_ipv4_flow function is responsible for creating the flow rule. This function is located in the ``flow_blocks.c`` file. .. code-block:: c static struct rte_flow * - generate_ipv4_flow(uint8_t port_id, uint16_t rx_q, + generate_ipv4_flow(uint8_t port_id, + cstructrte_flow_shared_action *shared_action, uint32_t src_ip, uint32_t src_mask, uint32_t dest_ip, uint32_t dest_mask, struct rte_flow_error *error) @@ -393,7 +436,6 @@ This function is located in the ``flow_blocks.c`` file. struct rte_flow_item pattern[MAX_PATTERN_NUM]; struct rte_flow_action action[MAX_ACTION_NUM]; struct rte_flow *flow = NULL; - struct rte_flow_action_queue queue = { .index = rx_q }; struct rte_flow_item_ipv4 ip_spec; struct rte_flow_item_ipv4 ip_mask; @@ -411,8 +453,8 @@ This function is located in the ``flow_blocks.c`` file. * create the action sequence. * one action only, move packet to queue */ - action[0].type = RTE_FLOW_ACTION_TYPE_QUEUE; - action[0].conf = &queue; + action[0].type = RTE_FLOW_ACTION_TYPE_SHARED; + action[0].conf = shared_action; action[1].type = RTE_FLOW_ACTION_TYPE_END; /* @@ -468,12 +510,12 @@ The following part create the flow attributes, in our case ingress. attr.ingress = 1; The third part defines the action to be taken when a packet matches -the rule. In this case send the packet to queue. +the rule. In this case send the packet to single RSS queue. .. code-block:: c - action[0].type = RTE_FLOW_ACTION_TYPE_QUEUE; - action[0].conf = &queue; + action[0].type = RTE_FLOW_ACTION_TYPE_SHARED; + action[0].conf = shared_action; action[1].type = RTE_FLOW_ACTION_TYPE_END; The fourth part is responsible for creating the pattern and is built from diff --git a/examples/flow_filtering/flow_blocks.c b/examples/flow_filtering/flow_blocks.c index 575d792810..99bfed3172 100644 --- a/examples/flow_filtering/flow_blocks.c +++ b/examples/flow_filtering/flow_blocks.c @@ -6,11 +6,11 @@ #define MAX_ACTION_NUM 2 struct rte_flow * -generate_ipv4_flow(uint16_t port_id, uint16_t rx_q, - uint32_t src_ip, uint32_t src_mask, - uint32_t dest_ip, uint32_t dest_mask, - struct rte_flow_error *error); - +generate_ipv4_flow(uint16_t port_id, + struct rte_flow_shared_action *shared_action, + uint32_t src_ip, uint32_t src_mask, + uint32_t dest_ip, uint32_t dest_mask, + struct rte_flow_error *error); /** * create a flow rule that sends packets with matching src and dest ip @@ -18,8 +18,8 @@ generate_ipv4_flow(uint16_t port_id, uint16_t rx_q, * * @param port_id * The selected port. - * @param rx_q - * The selected target queue. + * @param shared_action + * The shared RSS action with single queue * @param src_ip * The src ip value to match the input packet. * @param src_mask @@ -35,16 +35,16 @@ generate_ipv4_flow(uint16_t port_id, uint16_t rx_q, * A flow if the rule could be created else return NULL. */ struct rte_flow * -generate_ipv4_flow(uint16_t port_id, uint16_t rx_q, - uint32_t src_ip, uint32_t src_mask, - uint32_t dest_ip, uint32_t dest_mask, - struct rte_flow_error *error) +generate_ipv4_flow(uint16_t port_id, + struct rte_flow_shared_action *shared_action, + uint32_t src_ip, uint32_t src_mask, + uint32_t dest_ip, uint32_t dest_mask, + struct rte_flow_error *error) { struct rte_flow_attr attr; struct rte_flow_item pattern[MAX_PATTERN_NUM]; struct rte_flow_action action[MAX_ACTION_NUM]; struct rte_flow *flow = NULL; - struct rte_flow_action_queue queue = { .index = rx_q }; struct rte_flow_item_ipv4 ip_spec; struct rte_flow_item_ipv4 ip_mask; int res; @@ -61,10 +61,10 @@ generate_ipv4_flow(uint16_t port_id, uint16_t rx_q, /* * create the action sequence. - * one action only, move packet to queue + * one action only, move packet to shared RSS queue */ - action[0].type = RTE_FLOW_ACTION_TYPE_QUEUE; - action[0].conf = &queue; + action[0].type = RTE_FLOW_ACTION_TYPE_SHARED; + action[0].conf = shared_action; action[1].type = RTE_FLOW_ACTION_TYPE_END; /* diff --git a/examples/flow_filtering/main.c b/examples/flow_filtering/main.c index cc9e7e7808..d6b18d95fc 100644 --- a/examples/flow_filtering/main.c +++ b/examples/flow_filtering/main.c @@ -32,8 +32,7 @@ static volatile bool force_quit; static uint16_t port_id; -static uint16_t nr_queues = 5; -static uint8_t selected_queue = 1; +static uint16_t nr_queues = 2; struct rte_mempool *mbuf_pool; struct rte_flow *flow; @@ -42,6 +41,24 @@ struct rte_flow *flow; #define FULL_MASK 0xffffffff /* full mask */ #define EMPTY_MASK 0x0 /* empty mask */ +struct rte_flow_shared_action *shared_action; +uint16_t rss_queue[1] = {0}; + +struct rte_flow_action_rss action_rss = { + .func = RTE_ETH_HASH_FUNCTION_DEFAULT, + .level = 0, + .types = 0, + .key_len = 0, + .key = NULL, + .queue = rss_queue, + .queue_num = 1, +}; + +struct rte_flow_action action = { + .type = RTE_FLOW_ACTION_TYPE_RSS, + .conf = &action_rss, +}; + #include "flow_blocks.c" static inline void @@ -61,6 +78,7 @@ main_loop(void) uint16_t nb_rx; uint16_t i; uint16_t j; + int ret; while (!force_quit) { for (i = 0; i < nr_queues; i++) { @@ -82,6 +100,21 @@ main_loop(void) rte_pktmbuf_free(m); } + if (rss_queue[0] == 0) { + printf(">>> switching queue 0 -> 1\n"); + rss_queue[0] = 1; + } else { + printf(">>> switching queue 1 -> 0\n"); + rss_queue[0] = 0; + } + ret = rte_flow_shared_action_update + (port_id, shared_action, &action, + &error); + if (ret) + rte_exit(EXIT_FAILURE, + ":: error: RSS action update " + "failed: %s\n", + rte_strerror(-ret)); } } } @@ -243,8 +276,10 @@ main(int argc, char **argv) init_port(); + shared_action = rte_flow_shared_action_create(port_id, &action, &error); + /* create flow for send packet with */ - flow = generate_ipv4_flow(port_id, selected_queue, + flow = generate_ipv4_flow(port_id, shared_action, SRC_IP, EMPTY_MASK, DEST_IP, FULL_MASK, &error); if (!flow) {