@@ -68,21 +68,18 @@ EAL Initialization and cmdline Start
The first task is the initialization of the Environment Abstraction Layer (EAL).
This is achieved as follows:
-.. code-block:: c
-
- int main(int argc, char **argv)
- {
- ret = rte_eal_init(argc, argv);
- if (ret < 0)
- rte_panic("Cannot init EAL\n");
+.. literalinclude:: ../../../examples/cmdline/main.c
+ :language: c
+ :start-after: Initialization of the Environment Abstraction Layer (EAL). 8<
+ :end-before: >8 End of initialization of Environment Abstraction Layer (EAL).
Then, a new command line object is created and started to interact with the user through the console:
-.. code-block:: c
-
- cl = cmdline_stdin_new(main_ctx, "example> ");
- cmdline_interact(cl);
- cmdline_stdin_exit(cl);
+.. literalinclude:: ../../../examples/cmdline/main.c
+ :language: c
+ :start-after: Creating a new command line object. 8<
+ :end-before: >8 End of creating a new command line object.
+ :dedent: 1
The cmd line_interact() function returns when the user types **Ctrl-d** and in this case,
the application exits.
@@ -92,14 +89,10 @@ Defining a cmdline Context
A cmdline context is a list of commands that are listed in a NULL-terminated table, for example:
-.. code-block:: c
-
- cmdline_parse_ctx_t main_ctx[] = {
- (cmdline_parse_inst_t *) &cmd_obj_del_show,
- (cmdline_parse_inst_t *) &cmd_obj_add,
- (cmdline_parse_inst_t *) &cmd_help,
- NULL,
- };
+.. literalinclude:: ../../../examples/cmdline/commands.c
+ :language: c
+ :start-after: Cmdline context list of commands in NULL-terminated table. 8<
+ :end-before: >8 End of context list.
Each command (of type cmdline_parse_inst_t) is defined statically.
It contains a pointer to a callback function that is executed when the command is parsed,
@@ -120,33 +113,10 @@ in the parse_obj_list.c and parse_obj_list.h files.
For example, the cmd_obj_del_show command is defined as shown below:
-.. code-block:: c
-
- struct cmd_obj_add_result {
- cmdline_fixed_string_t action;
- cmdline_fixed_string_t name;
- struct object *obj;
- };
-
- static void cmd_obj_del_show_parsed(void *parsed_result, struct cmdline *cl, __rte_unused void *data)
- {
- /* ... */
- }
-
- cmdline_parse_token_string_t cmd_obj_action = TOKEN_STRING_INITIALIZER(struct cmd_obj_del_show_result, action, "show#del");
-
- parse_token_obj_list_t cmd_obj_obj = TOKEN_OBJ_LIST_INITIALIZER(struct cmd_obj_del_show_result, obj, &global_obj_list);
-
- cmdline_parse_inst_t cmd_obj_del_show = {
- .f = cmd_obj_del_show_parsed, /* function to call */
- .data = NULL, /* 2nd arg of func */
- .help_str = "Show/del an object",
- .tokens = { /* token list, NULL terminated */
- (void *)&cmd_obj_action,
- (void *)&cmd_obj_obj,
- NULL,
- },
- };
+.. literalinclude:: ../../../examples/cmdline/commands.c
+ :language: c
+ :start-after: Show or delete tokens. 8<
+ :end-before: >8 End of show or delete tokens.
This command is composed of two tokens:
@@ -64,81 +64,12 @@ ACL field definitions for the IPv4 5 tuple rule
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The following field definitions are used when creating the ACL table during
-initialisation of the ``Flow Classify`` application..
+initialisation of the ``Flow Classify`` application
-.. code-block:: c
-
- enum {
- PROTO_FIELD_IPV4,
- SRC_FIELD_IPV4,
- DST_FIELD_IPV4,
- SRCP_FIELD_IPV4,
- DSTP_FIELD_IPV4,
- NUM_FIELDS_IPV4
- };
-
- enum {
- PROTO_INPUT_IPV4,
- SRC_INPUT_IPV4,
- DST_INPUT_IPV4,
- SRCP_DESTP_INPUT_IPV4
- };
-
- static struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = {
- /* first input field - always one byte long. */
- {
- .type = RTE_ACL_FIELD_TYPE_BITMASK,
- .size = sizeof(uint8_t),
- .field_index = PROTO_FIELD_IPV4,
- .input_index = PROTO_INPUT_IPV4,
- .offset = sizeof(struct rte_ether_hdr) +
- offsetof(struct rte_ipv4_hdr, next_proto_id),
- },
- /* next input field (IPv4 source address) - 4 consecutive bytes. */
- {
- /* rte_flow uses a bit mask for IPv4 addresses */
- .type = RTE_ACL_FIELD_TYPE_BITMASK,
- .size = sizeof(uint32_t),
- .field_index = SRC_FIELD_IPV4,
- .input_index = SRC_INPUT_IPV4,
- .offset = sizeof(struct rte_ether_hdr) +
- offsetof(struct rte_ipv4_hdr, src_addr),
- },
- /* next input field (IPv4 destination address) - 4 consecutive bytes. */
- {
- /* rte_flow uses a bit mask for IPv4 addresses */
- .type = RTE_ACL_FIELD_TYPE_BITMASK,
- .size = sizeof(uint32_t),
- .field_index = DST_FIELD_IPV4,
- .input_index = DST_INPUT_IPV4,
- .offset = sizeof(struct rte_ether_hdr) +
- offsetof(struct rte_ipv4_hdr, dst_addr),
- },
- /*
- * Next 2 fields (src & dst ports) form 4 consecutive bytes.
- * They share the same input index.
- */
- {
- /* rte_flow uses a bit mask for protocol ports */
- .type = RTE_ACL_FIELD_TYPE_BITMASK,
- .size = sizeof(uint16_t),
- .field_index = SRCP_FIELD_IPV4,
- .input_index = SRCP_DESTP_INPUT_IPV4,
- .offset = sizeof(struct rte_ether_hdr) +
- sizeof(struct rte_ipv4_hdr) +
- offsetof(struct rte_tcp_hdr, src_port),
- },
- {
- /* rte_flow uses a bit mask for protocol ports */
- .type = RTE_ACL_FIELD_TYPE_BITMASK,
- .size = sizeof(uint16_t),
- .field_index = DSTP_FIELD_IPV4,
- .input_index = SRCP_DESTP_INPUT_IPV4,
- .offset = sizeof(struct rte_ether_hdr) +
- sizeof(struct rte_ipv4_hdr) +
- offsetof(struct rte_tcp_hdr, dst_port),
- },
- };
+.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
+ :language: c
+ :start-after: Creation of ACL table during initialization of application. 8<
+ :end-before: >8 End of creation of ACL table.
The Main Function
~~~~~~~~~~~~~~~~~
@@ -150,34 +81,28 @@ The first task is to initialize the Environment Abstraction Layer (EAL).
The ``argc`` and ``argv`` arguments are provided to the ``rte_eal_init()``
function. The value returned is the number of parsed arguments:
-.. code-block:: c
-
- int ret = rte_eal_init(argc, argv);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
+.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
+ :language: c
+ :start-after: Initialize the Environment Abstraction Layer (EAL). 8<
+ :end-before: >8 End of initialization of EAL.
+ :dedent: 1
It then parses the flow_classify application arguments
-.. code-block:: c
-
- ret = parse_args(argc, argv);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Invalid flow_classify parameters\n");
+.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
+ :language: c
+ :start-after: Parse application arguments (after the EAL ones). 8<
+ :end-before: >8 End of parse application arguments.
+ :dedent: 1
The ``main()`` function also allocates a mempool to hold the mbufs
(Message Buffers) used by the application:
-.. code-block:: c
-
- mbuf_pool = rte_mempool_create("MBUF_POOL",
- NUM_MBUFS * nb_ports,
- MBUF_SIZE,
- MBUF_CACHE_SIZE,
- sizeof(struct rte_pktmbuf_pool_private),
- rte_pktmbuf_pool_init, NULL,
- rte_pktmbuf_init, NULL,
- rte_socket_id(),
- 0);
+.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
+ :language: c
+ :start-after: Creates a new mempool in memory to hold the mbufs. 8<
+ :end-before: >8 End of creation of new mempool in memory.
+ :dedent: 1
mbufs are the packet buffer structure used by DPDK. They are explained in
detail in the "Mbuf Library" section of the *DPDK Programmer's Guide*.
@@ -185,72 +110,35 @@ detail in the "Mbuf Library" section of the *DPDK Programmer's Guide*.
The ``main()`` function also initializes all the ports using the user defined
``port_init()`` function which is explained in the next section:
-.. code-block:: c
-
- RTE_ETH_FOREACH_DEV(portid) {
- if (port_init(portid, mbuf_pool) != 0) {
- rte_exit(EXIT_FAILURE,
- "Cannot init port %" PRIu8 "\n", portid);
- }
- }
+.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
+ :language: c
+ :start-after: Initialize all ports. 8<
+ :end-before: >8 End of initialization of all ports.
+ :dedent: 1
The ``main()`` function creates the ``flow classifier object`` and adds an ``ACL
table`` to the flow classifier.
-.. code-block:: c
+.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
+ :language: c
+ :start-after: Creation of flow classifier object. 8<
+ :end-before: >8 End of creation of flow classifier object.
- struct flow_classifier {
- struct rte_flow_classifier *cls;
- };
-
- struct flow_classifier_acl {
- struct flow_classifier cls;
- } __rte_cache_aligned;
-
- /* Memory allocation */
- size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct flow_classifier_acl));
- cls_app = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
- if (cls_app == NULL)
- rte_exit(EXIT_FAILURE, "Cannot allocate classifier memory\n");
-
- cls_params.name = "flow_classifier";
- cls_params.socket_id = socket_id;
-
- cls_app->cls = rte_flow_classifier_create(&cls_params);
- if (cls_app->cls == NULL) {
- rte_free(cls_app);
- rte_exit(EXIT_FAILURE, "Cannot create classifier\n");
- }
-
- /* initialise ACL table params */
- table_acl_params.name = "table_acl_ipv4_5tuple";
- table_acl_params.n_rule_fields = RTE_DIM(ipv4_defs);
- table_acl_params.n_rules = FLOW_CLASSIFY_MAX_RULE_NUM;
- memcpy(table_acl_params.field_format, ipv4_defs, sizeof(ipv4_defs));
-
- /* initialise table create params */
- cls_table_params.ops = &rte_table_acl_ops,
- cls_table_params.arg_create = &table_acl_params,
- cls_table_params.type = RTE_FLOW_CLASSIFY_TABLE_ACL_IP4_5TUPLE;
-
- ret = rte_flow_classify_table_create(cls_app->cls, &cls_table_params);
- if (ret) {
- rte_flow_classifier_free(cls_app->cls);
- rte_free(cls);
- rte_exit(EXIT_FAILURE, "Failed to create classifier table\n");
- }
+.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
+ :language: c
+ :start-after: Memory allocation. 8<
+ :end-before: >8 End of initialization of table create params.
+ :dedent: 1
It then reads the ipv4_rules_file.txt file and initialises the parameters for
the ``rte_flow_classify_table_entry_add`` API.
This API adds a rule to the ACL table.
-.. code-block:: c
-
- if (add_rules(parm_config.rule_ipv4_name)) {
- rte_flow_classifier_free(cls_app->cls);
- rte_free(cls_app);
- rte_exit(EXIT_FAILURE, "Failed to add rules\n");
- }
+.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
+ :language: c
+ :start-after: Read file of IPv4 tuple rules. 8<
+ :end-before: >8 End of reading file of IPv4 5 tuple rules.
+ :dedent: 1
Once the initialization is complete, the application is ready to launch a
function on an lcore. In this example ``lcore_main()`` is called on a single
@@ -268,79 +156,29 @@ The Port Initialization Function
The main functional part of the port initialization used in the Basic
Forwarding application is shown below:
-.. code-block:: c
-
- static inline int
- port_init(uint16_t port, struct rte_mempool *mbuf_pool)
- {
- struct rte_eth_conf port_conf = port_conf_default;
- const uint16_t rx_rings = 1, tx_rings = 1;
- struct rte_ether_addr addr;
- int retval;
- uint16_t q;
-
- /* Configure the Ethernet device. */
- retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
- if (retval != 0)
- return retval;
-
- /* Allocate and set up 1 RX queue per Ethernet port. */
- for (q = 0; q < rx_rings; q++) {
- retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE,
- rte_eth_dev_socket_id(port), NULL, mbuf_pool);
- if (retval < 0)
- return retval;
- }
-
- /* Allocate and set up 1 TX queue per Ethernet port. */
- for (q = 0; q < tx_rings; q++) {
- retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE,
- rte_eth_dev_socket_id(port), NULL);
- if (retval < 0)
- return retval;
- }
-
- /* Start the Ethernet port. */
- retval = rte_eth_dev_start(port);
- if (retval < 0)
- return retval;
-
- /* Display the port MAC address. */
- retval = rte_eth_macaddr_get(port, &addr);
- if (retval < 0)
- return retval;
- printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
- " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
- port,
- addr.addr_bytes[0], addr.addr_bytes[1],
- addr.addr_bytes[2], addr.addr_bytes[3],
- addr.addr_bytes[4], addr.addr_bytes[5]);
-
- /* Enable RX in promiscuous mode for the Ethernet device. */
- retval = rte_eth_promiscuous_enable(port);
- if (retval != 0)
- return retval;
-
- return 0;
- }
+.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
+ :language: c
+ :start-after: Initializing port using global settings. 8<
+ :end-before: >8 End of initializing a given port.
The Ethernet ports are configured with default settings using the
``rte_eth_dev_configure()`` function and the ``port_conf_default`` struct.
-.. code-block:: c
-
- static const struct rte_eth_conf port_conf_default = {
- .rxmode = { .max_rx_pkt_len = RTE_ETHER_MAX_LEN }
- };
+.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
+ :language: c
+ :start-after: Ethernet ports configured with default settings using struct. 8<
+ :end-before: >8 End of configuration of Ethernet ports.
For this example the ports are set up with 1 RX and 1 TX queue using the
``rte_eth_rx_queue_setup()`` and ``rte_eth_tx_queue_setup()`` functions.
The Ethernet port is then started:
-.. code-block:: c
-
- retval = rte_eth_dev_start(port);
+.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
+ :language: c
+ :start-after: Start the Ethernet port. 8<
+ :end-before: >8 End of starting the Ethernet port.
+ :dedent: 1
Finally the RX port is set in promiscuous mode:
@@ -356,51 +194,10 @@ The ``add_rules`` function reads the ``ipv4_rules_file.txt`` file and calls the
``add_classify_rule`` function which calls the
``rte_flow_classify_table_entry_add`` API.
-.. code-block:: c
-
- static int
- add_rules(const char *rule_path)
- {
- FILE *fh;
- char buff[LINE_MAX];
- unsigned int i = 0;
- unsigned int total_num = 0;
- struct rte_eth_ntuple_filter ntuple_filter;
-
- fh = fopen(rule_path, "rb");
- if (fh == NULL)
- rte_exit(EXIT_FAILURE, "%s: Open %s failed\n", __func__,
- rule_path);
-
- fseek(fh, 0, SEEK_SET);
-
- i = 0;
- while (fgets(buff, LINE_MAX, fh) != NULL) {
- i++;
-
- if (is_bypass_line(buff))
- continue;
-
- if (total_num >= FLOW_CLASSIFY_MAX_RULE_NUM - 1) {
- printf("\nINFO: classify rule capacity %d reached\n",
- total_num);
- break;
- }
-
- if (parse_ipv4_5tuple_rule(buff, &ntuple_filter) != 0)
- rte_exit(EXIT_FAILURE,
- "%s Line %u: parse rules error\n",
- rule_path, i);
-
- if (add_classify_rule(&ntuple_filter) != 0)
- rte_exit(EXIT_FAILURE, "add rule error\n");
-
- total_num++;
- }
-
- fclose(fh);
- return 0;
- }
+.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
+ :language: c
+ :start-after: Reads file and calls the add_classify_rule function. 8<
+ :end-before: >8 End of add_rules.
The Lcore Main function
@@ -412,117 +209,23 @@ The ``lcore_main`` function calls the ``rte_flow_classifier_query`` API.
For the Basic Forwarding application the ``lcore_main`` function looks like the
following:
-.. code-block:: c
+.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
+ :language: c
+ :start-after: Flow classify data. 8<
+ :end-before: >8 End of flow classify data.
- /* flow classify data */
- static int num_classify_rules;
- static struct rte_flow_classify_rule *rules[MAX_NUM_CLASSIFY];
- static struct rte_flow_classify_ipv4_5tuple_stats ntuple_stats;
- static struct rte_flow_classify_stats classify_stats = {
- .stats = (void *)&ntuple_stats
- };
-
- static __rte_noreturn void
- lcore_main(cls_app)
- {
- uint16_t port;
-
- /*
- * Check that the port is on the same NUMA node as the polling thread
- * for best performance.
- */
- RTE_ETH_FOREACH_DEV(port)
- if (rte_eth_dev_socket_id(port) > 0 &&
- rte_eth_dev_socket_id(port) != (int)rte_socket_id()) {
- printf("\n\n");
- printf("WARNING: port %u is on remote NUMA node\n",
- port);
- printf("to polling thread.\n");
- printf("Performance will not be optimal.\n");
-
- printf("\nCore %u forwarding packets. \n",
- rte_lcore_id());
- printf("[Ctrl+C to quit]\n
- }
-
- /* Run until the application is quit or killed. */
- for (;;) {
- /*
- * Receive packets on a port and forward them on the paired
- * port. The mapping is 0 -> 1, 1 -> 0, 2 -> 3, 3 -> 2, etc.
- */
- RTE_ETH_FOREACH_DEV(port) {
-
- /* Get burst of RX packets, from first port of pair. */
- struct rte_mbuf *bufs[BURST_SIZE];
- const uint16_t nb_rx = rte_eth_rx_burst(port, 0,
- bufs, BURST_SIZE);
-
- if (unlikely(nb_rx == 0))
- continue;
-
- for (i = 0; i < MAX_NUM_CLASSIFY; i++) {
- if (rules[i]) {
- ret = rte_flow_classifier_query(
- cls_app->cls,
- bufs, nb_rx, rules[i],
- &classify_stats);
- if (ret)
- printf(
- "rule [%d] query failed ret [%d]\n\n",
- i, ret);
- else {
- printf(
- "rule[%d] count=%"PRIu64"\n",
- i, ntuple_stats.counter1);
-
- printf("proto = %d\n",
- ntuple_stats.ipv4_5tuple.proto);
- }
- }
- }
-
- /* Send burst of TX packets, to second port of pair. */
- const uint16_t nb_tx = rte_eth_tx_burst(port ^ 1, 0,
- bufs, nb_rx);
-
- /* Free any unsent packets. */
- if (unlikely(nb_tx < nb_rx)) {
- uint16_t buf;
- for (buf = nb_tx; buf < nb_rx; buf++)
- rte_pktmbuf_free(bufs[buf]);
- }
- }
- }
- }
+.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
+ :language: c
+ :start-after: Classifying the packets. 8<
+ :end-before: >8 End of lcore main.
The main work of the application is done within the loop:
-.. code-block:: c
-
- for (;;) {
- RTE_ETH_FOREACH_DEV(port) {
-
- /* Get burst of RX packets, from first port of pair. */
- struct rte_mbuf *bufs[BURST_SIZE];
- const uint16_t nb_rx = rte_eth_rx_burst(port, 0,
- bufs, BURST_SIZE);
-
- if (unlikely(nb_rx == 0))
- continue;
-
- /* Send burst of TX packets, to second port of pair. */
- const uint16_t nb_tx = rte_eth_tx_burst(port ^ 1, 0,
- bufs, nb_rx);
-
- /* Free any unsent packets. */
- if (unlikely(nb_tx < nb_rx)) {
- uint16_t buf;
- for (buf = nb_tx; buf < nb_rx; buf++)
- rte_pktmbuf_free(bufs[buf]);
- }
- }
- }
+.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
+ :language: c
+ :start-after: Run until the application is quit or killed. 8<
+ :end-before: >8 End of main loop.
+ :dedent: 1
Packets are received in bursts on the RX ports and transmitted in bursts on
the TX ports. The ports are grouped in pairs with a simple mapping scheme
@@ -53,21 +53,21 @@ The first task is to initialize the Environment Abstraction Layer (EAL). The
``argc`` and ``argv`` arguments are provided to the ``rte_eal_init()``
function. The value returned is the number of parsed arguments:
-.. code-block:: c
-
- int ret = rte_eal_init(argc, argv);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
+.. literalinclude:: ../../../examples/flow_filtering/main.c
+ :language: c
+ :start-after: Initialize EAL. 8<
+ :end-before: >8 End of Initialization of EAL.
+ :dedent: 1
The ``main()`` also allocates a mempool to hold the mbufs (Message Buffers)
used by the application:
-.. code-block:: c
-
- mbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", 4096, 128, 0,
- RTE_MBUF_DEFAULT_BUF_SIZE,
- rte_socket_id());
+.. literalinclude:: ../../../examples/flow_filtering/main.c
+ :language: c
+ :start-after: Allocates a mempool to hold the mbufs. 8<
+ :end-before: >8 End of allocating a mempool to hold the mbufs.
+ :dedent: 1
Mbufs are the packet buffer structure used by DPDK. They are explained in
detail in the "Mbuf Library" section of the *DPDK Programmer's Guide*.
@@ -75,33 +75,30 @@ detail in the "Mbuf Library" section of the *DPDK Programmer's Guide*.
The ``main()`` function also initializes all the ports using the user defined
``init_port()`` function which is explained in the next section:
-.. code-block:: c
-
- init_port();
+.. literalinclude:: ../../../examples/flow_filtering/main.c
+ :language: c
+ :start-after: Initializes all the ports using the user defined init_port(). 8<
+ :end-before: >8 End of Initializing the ports using user defined init_port().
+ :dedent: 1
Once the initialization is complete, we set the flow rule using the
following code:
-.. code-block:: c
-
- /* create flow for send packet with */
- flow = generate_ipv4_flow(port_id, selected_queue,
- SRC_IP, EMPTY_MASK,
- DEST_IP, FULL_MASK, &error);
- if (!flow) {
- printf("Flow can't be created %d message: %s\n",
- error.type,
- error.message ? error.message : "(no stated reason)");
- rte_exit(EXIT_FAILURE, "error in creating flow");
- }
+.. literalinclude:: ../../../examples/flow_filtering/main.c
+ :language: c
+ :start-after: Create flow for send packet with. 8<
+ :end-before: >8 End of creating flow for send packet with.
+ :dedent: 1
In the last part the application is ready to launch the
``main_loop()`` function. Which is explained below.
-.. code-block:: c
-
- main_loop();
+.. literalinclude:: ../../../examples/flow_filtering/main.c
+ :language: c
+ :start-after: Launching main_loop(). 8<
+ :end-before: >8 End of launching main_loop().
+ :dedent: 1
The Port Initialization Function
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -109,176 +106,54 @@ The Port Initialization Function
The main functional part of the port initialization used in the flow filtering
application is shown below:
-.. code-block:: c
-
- init_port(void)
- {
- int ret;
- uint16_t i;
- struct rte_eth_conf port_conf = {
- .rxmode = {
- .split_hdr_size = 0,
- },
- .txmode = {
- .offloads =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO,
- },
- };
- struct rte_eth_txconf txq_conf;
- struct rte_eth_rxconf rxq_conf;
- struct rte_eth_dev_info dev_info;
-
- printf(":: initializing port: %d\n", port_id);
- ret = rte_eth_dev_configure(port_id,
- nr_queues, nr_queues, &port_conf);
- if (ret < 0) {
- rte_exit(EXIT_FAILURE,
- ":: cannot configure device: err=%d, port=%u\n",
- ret, port_id);
- }
-
- rte_eth_dev_info_get(port_id, &dev_info);
- rxq_conf = dev_info.default_rxconf;
- rxq_conf.offloads = port_conf.rxmode.offloads;
- /* only set Rx queues: something we care only so far */
- for (i = 0; i < nr_queues; i++) {
- ret = rte_eth_rx_queue_setup(port_id, i, 512,
- rte_eth_dev_socket_id(port_id),
- &rxq_conf,
- mbuf_pool);
- if (ret < 0) {
- rte_exit(EXIT_FAILURE,
- ":: Rx queue setup failed: err=%d, port=%u\n",
- ret, port_id);
- }
- }
-
- txq_conf = dev_info.default_txconf;
- txq_conf.offloads = port_conf.txmode.offloads;
-
- for (i = 0; i < nr_queues; i++) {
- ret = rte_eth_tx_queue_setup(port_id, i, 512,
- rte_eth_dev_socket_id(port_id),
- &txq_conf);
- if (ret < 0) {
- rte_exit(EXIT_FAILURE,
- ":: Tx queue setup failed: err=%d, port=%u\n",
- ret, port_id);
- }
- }
-
- ret = rte_eth_promiscuous_enable(port_id);
- if (ret != 0) {
- rte_exit(EXIT_FAILURE,
- ":: cannot enable promiscuous mode: err=%d, port=%u\n",
- ret, port_id);
- }
-
- ret = rte_eth_dev_start(port_id);
- if (ret < 0) {
- rte_exit(EXIT_FAILURE,
- "rte_eth_dev_start:err=%d, port=%u\n",
- ret, port_id);
- }
-
- assert_link_status();
-
- printf(":: initializing port: %d done\n", port_id);
- }
+.. literalinclude:: ../../../examples/flow_filtering/main.c
+ :language: c
+ :start-after: Port initialization used in flow filtering. 8<
+ :end-before: >8 End of Port initialization used in flow filtering.
The Ethernet port is configured with default settings using the
``rte_eth_dev_configure()`` function and the ``port_conf_default`` struct:
-.. code-block:: c
-
- struct rte_eth_conf port_conf = {
- .rxmode = {
- .split_hdr_size = 0,
- },
- .txmode = {
- .offloads =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO,
- },
- };
-
- ret = rte_eth_dev_configure(port_id, nr_queues, nr_queues, &port_conf);
- if (ret < 0) {
- rte_exit(EXIT_FAILURE,
- ":: cannot configure device: err=%d, port=%u\n",
- ret, port_id);
- }
- rte_eth_dev_info_get(port_id, &dev_info);
- rxq_conf = dev_info.default_rxconf;
- rxq_conf.offloads = port_conf.rxmode.offloads;
+.. literalinclude:: ../../../examples/flow_filtering/main.c
+ :language: c
+ :start-after: Ethernet port configured with default settings. 8<
+ :end-before: >8 End of ethernet port configured with default settings.
+ :dedent: 1
For this example we are configuring number of rx and tx queues that are connected
to a single port.
-.. code-block:: c
-
- for (i = 0; i < nr_queues; i++) {
- ret = rte_eth_rx_queue_setup(port_id, i, 512,
- rte_eth_dev_socket_id(port_id),
- &rxq_conf,
- mbuf_pool);
- if (ret < 0) {
- rte_exit(EXIT_FAILURE,
- ":: Rx queue setup failed: err=%d, port=%u\n",
- ret, port_id);
- }
- }
-
- for (i = 0; i < nr_queues; i++) {
- ret = rte_eth_tx_queue_setup(port_id, i, 512,
- rte_eth_dev_socket_id(port_id),
- &txq_conf);
- if (ret < 0) {
- rte_exit(EXIT_FAILURE,
- ":: Tx queue setup failed: err=%d, port=%u\n",
- ret, port_id);
- }
- }
+.. literalinclude:: ../../../examples/flow_filtering/main.c
+ :language: c
+ :start-after: Configuring number of RX and TX queues connected to single port. 8<
+ :end-before: >8 End of Configuring RX and TX queues connected to single port.
+ :dedent: 1
In the next step we create and apply the flow rule. which is to send packets
with destination ip equals to 192.168.1.1 to queue number 1. The detail
explanation of the ``generate_ipv4_flow()`` appears later in this document:
-.. code-block:: c
-
- flow = generate_ipv4_flow(port_id, selected_queue,
- SRC_IP, EMPTY_MASK,
- DEST_IP, FULL_MASK, &error);
+.. literalinclude:: ../../../examples/flow_filtering/main.c
+ :language: c
+ :start-after: Create flow for send packet with. 8<
+ :end-before: >8 End of create flow and the flow rule.
+ :dedent: 1
We are setting the RX port to promiscuous mode:
-.. code-block:: c
-
- ret = rte_eth_promiscuous_enable(port_id);
- if (ret != 0) {
- rte_exit(EXIT_FAILURE,
- ":: cannot enable promiscuous mode: err=%d, port=%u\n",
- ret, port_id);
- }
+.. literalinclude:: ../../../examples/flow_filtering/main.c
+ :language: c
+ :start-after: Setting the RX port to promiscuous mode. 8<
+ :end-before: >8 End of setting the RX port to promiscuous mode.
+ :dedent: 1
The last step is to start the port.
-.. code-block:: c
-
- ret = rte_eth_dev_start(port_id);
- if (ret < 0) {
- rte_exit(EXIT_FAILURE, "rte_eth_dev_start:err%d, port=%u\n",
- ret, port_id);
- }
+.. literalinclude:: ../../../examples/flow_filtering/main.c
+ :language: c
+ :start-after: Starting the port. 8<
+ :end-before: >8 End of starting the port.
+ :dedent: 1
The main_loop function
@@ -288,66 +163,18 @@ As we saw above the ``main()`` function calls an application function to handle
the main loop. For the flow filtering application the main_loop function
looks like the following:
-.. code-block:: c
-
- static void
- main_loop(void)
- {
- struct rte_mbuf *mbufs[32];
- struct rte_ether_hdr *eth_hdr;
- uint16_t nb_rx;
- uint16_t i;
- uint16_t j;
-
- while (!force_quit) {
- for (i = 0; i < nr_queues; i++) {
- nb_rx = rte_eth_rx_burst(port_id,
- i, mbufs, 32);
- if (nb_rx) {
- for (j = 0; j < nb_rx; j++) {
- struct rte_mbuf *m = mbufs[j];
-
- eth_hdr = rte_pktmbuf_mtod(m,
- struct rte_ether_hdr *);
- print_ether_addr("src=",
- ð_hdr->s_addr);
- print_ether_addr(" - dst=",
- ð_hdr->d_addr);
- printf(" - queue=0x%x",
- (unsigned int)i);
- printf("\n");
- rte_pktmbuf_free(m);
- }
- }
- }
- }
- /* closing and releasing resources */
- rte_flow_flush(port_id, &error);
- rte_eth_dev_stop(port_id);
- rte_eth_dev_close(port_id);
- }
+.. literalinclude:: ../../../examples/flow_filtering/main.c
+ :language: c
+ :start-after: Main_loop for flow filtering. 8<
+ :end-before: >8 End of reading the packets from all queues.
The main work of the application is reading the packets from all
queues and printing for each packet the destination queue:
-.. code-block:: c
-
- while (!force_quit) {
- for (i = 0; i < nr_queues; i++) {
- nb_rx = rte_eth_rx_burst(port_id, i, mbufs, 32);
- if (nb_rx) {
- for (j = 0; j < nb_rx; j++) {
- struct rte_mbuf *m = mbufs[j];
- eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
- print_ether_addr("src=", ð_hdr->s_addr);
- print_ether_addr(" - dst=", ð_hdr->d_addr);
- printf(" - queue=0x%x", (unsigned int)i);
- printf("\n");
- rte_pktmbuf_free(m);
- }
- }
- }
- }
+.. literalinclude:: ../../../examples/flow_filtering/main.c
+ :language: c
+ :start-after: Reading the packets from all queues. 8<
+ :end-before: >8 End of main_loop for flow filtering.
The forwarding loop can be interrupted and the application closed using
@@ -360,100 +187,34 @@ The generate_ipv4_flow function
The generate_ipv4_flow function is responsible for creating the flow rule.
This function is located in the ``flow_blocks.c`` file.
-.. code-block:: c
-
- static struct rte_flow *
- generate_ipv4_flow(uint16_t port_id, uint16_t rx_q,
- uint32_t src_ip, uint32_t src_mask,
- uint32_t dest_ip, uint32_t dest_mask,
- struct rte_flow_error *error)
- {
- struct rte_flow_attr attr;
- struct rte_flow_item pattern[MAX_PATTERN_NUM];
- struct rte_flow_action action[MAX_ACTION_NUM];
- struct rte_flow *flow = NULL;
- struct rte_flow_action_queue queue = { .index = rx_q };
- struct rte_flow_item_ipv4 ip_spec;
- struct rte_flow_item_ipv4 ip_mask;
-
- memset(pattern, 0, sizeof(pattern));
- memset(action, 0, sizeof(action));
-
- /*
- * set the rule attribute.
- * in this case only ingress packets will be checked.
- */
- memset(&attr, 0, sizeof(struct rte_flow_attr));
- attr.ingress = 1;
-
- /*
- * create the action sequence.
- * one action only, move packet to queue
- */
- action[0].type = RTE_FLOW_ACTION_TYPE_QUEUE;
- action[0].conf = &queue;
- action[1].type = RTE_FLOW_ACTION_TYPE_END;
-
- /*
- * set the first level of the pattern (ETH).
- * since in this example we just want to get the
- * ipv4 we set this level to allow all.
- */
- pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
-
- /*
- * setting the second level of the pattern (IP).
- * in this example this is the level we care about
- * so we set it according to the parameters.
- */
- memset(&ip_spec, 0, sizeof(struct rte_flow_item_ipv4));
- memset(&ip_mask, 0, sizeof(struct rte_flow_item_ipv4));
- ip_spec.hdr.dst_addr = htonl(dest_ip);
- ip_mask.hdr.dst_addr = dest_mask;
- ip_spec.hdr.src_addr = htonl(src_ip);
- ip_mask.hdr.src_addr = src_mask;
- pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
- pattern[1].spec = &ip_spec;
- pattern[1].mask = &ip_mask;
-
- /* the final level must be always type end */
- pattern[2].type = RTE_FLOW_ITEM_TYPE_END;
-
- int res = rte_flow_validate(port_id, &attr, pattern, action, error);
- if(!res)
- flow = rte_flow_create(port_id, &attr, pattern, action, error);
-
- return flow;
- }
+.. literalinclude:: ../../../examples/flow_filtering/flow_blocks.c
+ :language: c
+ :start-after: Function responsible for creating the flow rule. 8<
+ :end-before: >8 End of function responsible for creating the flow rule.
The first part of the function is declaring the structures that will be used.
-.. code-block:: c
-
- struct rte_flow_attr attr;
- struct rte_flow_item pattern[MAX_PATTERN_NUM];
- struct rte_flow_action action[MAX_ACTION_NUM];
- struct rte_flow *flow;
- struct rte_flow_error error;
- struct rte_flow_action_queue queue = { .index = rx_q };
- struct rte_flow_item_ipv4 ip_spec;
- struct rte_flow_item_ipv4 ip_mask;
+.. literalinclude:: ../../../examples/flow_filtering/flow_blocks.c
+ :language: c
+ :start-after: Declaring structs being used. 8<
+ :end-before: >8 End of declaring structs being used.
+ :dedent: 1
The following part create the flow attributes, in our case ingress.
-.. code-block:: c
-
- memset(&attr, 0, sizeof(struct rte_flow_attr));
- attr.ingress = 1;
+.. literalinclude:: ../../../examples/flow_filtering/flow_blocks.c
+ :language: c
+ :start-after: Set the rule attribute, only ingress packets will be checked. 8<
+ :end-before: >8 End of setting the rule attribute.
+ :dedent: 1
The third part defines the action to be taken when a packet matches
the rule. In this case send the packet to queue.
-.. code-block:: c
-
- action[0].type = RTE_FLOW_ACTION_TYPE_QUEUE;
- action[0].conf = &queue;
- action[1].type = RTE_FLOW_ACTION_TYPE_END;
+.. literalinclude:: ../../../examples/flow_filtering/flow_blocks.c
+ :language: c
+ :start-after: Function responsible for creating the flow rule. 8<
+ :end-before: >8 End of setting the rule attribute.
The fourth part is responsible for creating the pattern and is built from
number of steps. In each step we build one level of the pattern starting with
@@ -461,34 +222,32 @@ the lowest one.
Setting the first level of the pattern ETH:
-.. code-block:: c
-
- pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
+.. literalinclude:: ../../../examples/flow_filtering/flow_blocks.c
+ :language: c
+ :start-after: IPv4 we set this level to allow all. 8<
+ :end-before: >8 End of setting the first level of the pattern.
+ :dedent: 1
Setting the second level of the pattern IP:
-.. code-block:: c
-
- memset(&ip_spec, 0, sizeof(struct rte_flow_item_ipv4));
- memset(&ip_mask, 0, sizeof(struct rte_flow_item_ipv4));
- ip_spec.hdr.dst_addr = htonl(dest_ip);
- ip_mask.hdr.dst_addr = dest_mask;
- ip_spec.hdr.src_addr = htonl(src_ip);
- ip_mask.hdr.src_addr = src_mask;
- pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
- pattern[1].spec = &ip_spec;
- pattern[1].mask = &ip_mask;
+.. literalinclude:: ../../../examples/flow_filtering/flow_blocks.c
+ :language: c
+ :start-after: Setting the second level of the pattern. 8<
+ :end-before: >8 End of setting the second level of the pattern.
+ :dedent: 1
Closing the pattern part.
-.. code-block:: c
-
- pattern[2].type = RTE_FLOW_ITEM_TYPE_END;
+.. literalinclude:: ../../../examples/flow_filtering/flow_blocks.c
+ :language: c
+ :start-after: The final level must be always type end. 8<
+ :end-before: >8 End of final level must be always type end.
+ :dedent: 1
The last part of the function is to validate the rule and create it.
-.. code-block:: c
-
- int res = rte_flow_validate(port_id, &attr, pattern, action, &error);
- if (!res)
- flow = rte_flow_create(port_id, &attr, pattern, action, &error);
+.. literalinclude:: ../../../examples/flow_filtering/flow_blocks.c
+ :language: c
+ :start-after: Validate the rule and create it. 8<
+ :end-before: >8 End of validation the rule and create it.
+ :dedent: 1
@@ -37,16 +37,10 @@ EAL Initialization
The first task is to initialize the Environment Abstraction Layer (EAL).
This is done in the main() function using the following code:
-.. code-block:: c
-
- int
-
- main(int argc, char **argv)
-
- {
- ret = rte_eal_init(argc, argv);
- if (ret < 0)
- rte_panic("Cannot init EAL\n");
+.. literalinclude:: ../../../examples/helloworld/main.c
+ :language: c
+ :start-after: Initialization of Environment Abstraction Layer (EAL). 8<
+ :end-before: >8 End of initialization of Environment Abstraction Layer
This call finishes the initialization process that was started before main() is called (in case of a Linux environment).
The argc and argv arguments are provided to the rte_eal_init() function.
@@ -59,36 +53,25 @@ Once the EAL is initialized, the application is ready to launch a function on an
In this example, lcore_hello() is called on every available lcore.
The following is the definition of the function:
-.. code-block:: c
-
- static int
- lcore_hello(__rte_unused void *arg)
- {
- unsigned lcore_id;
-
- lcore_id = rte_lcore_id();
- printf("hello from core %u\n", lcore_id);
- return 0;
- }
+.. literalinclude:: ../../../examples/helloworld/main.c
+ :language: c
+ :start-after: Launch a function on lcore. 8<
+ :end-before: >8 End of launching function on lcore.
The code that launches the function on each lcore is as follows:
-.. code-block:: c
-
- /* call lcore_hello() on every worker lcore */
-
- RTE_LCORE_FOREACH_WORKER(lcore_id) {
- rte_eal_remote_launch(lcore_hello, NULL, lcore_id);
- }
-
- /* call it on main lcore too */
-
- lcore_hello(NULL);
+.. literalinclude:: ../../../examples/helloworld/main.c
+ :language: c
+ :start-after: Launches the function on each lcore. 8<
+ :end-before: >8 End of launching the function on each lcore.
+ :dedent: 1
The following code is equivalent and simpler:
-.. code-block:: c
-
- rte_eal_mp_remote_launch(lcore_hello, NULL, CALL_MAIN);
+.. literalinclude:: ../../../examples/helloworld/main.c
+ :language: c
+ :start-after: Simpler equivalent. 8<
+ :end-before: >8 End of simpler equivalent.
+ :dedent: 2
Refer to the *DPDK API Reference* for detailed information on the rte_eal_mp_remote_launch() function.
@@ -114,41 +114,32 @@ The first task is to initialize the Environment Abstraction Layer (EAL).
The ``argc`` and ``argv`` arguments are provided to the ``rte_eal_init()``
function. The value returned is the number of parsed arguments:
-.. code-block:: c
-
- /* init EAL */
- ret = rte_eal_init(argc, argv);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
+.. literalinclude:: ../../../examples/ioat/ioatfwd.c
+ :language: c
+ :start-after: Init EAL. 8<
+ :end-before: >8 End of init EAL.
+ :dedent: 1
The ``main()`` also allocates a mempool to hold the mbufs (Message Buffers)
used by the application:
-.. code-block:: c
-
- nb_mbufs = RTE_MAX(rte_eth_dev_count_avail() * (nb_rxd + nb_txd
- + MAX_PKT_BURST + rte_lcore_count() * MEMPOOL_CACHE_SIZE),
- MIN_POOL_SIZE);
-
- /* Create the mbuf pool */
- ioat_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", nb_mbufs,
- MEMPOOL_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE,
- rte_socket_id());
- if (ioat_pktmbuf_pool == NULL)
- rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n");
+.. literalinclude:: ../../../examples/ioat/ioatfwd.c
+ :language: c
+ :start-after: Allocates mempool to hold the mbufs. 8<
+ :end-before: >8 End of allocates mempool to hold the mbufs.
+ :dedent: 1
Mbufs are the packet buffer structure used by DPDK. They are explained in
detail in the "Mbuf Library" section of the *DPDK Programmer's Guide*.
The ``main()`` function also initializes the ports:
-.. code-block:: c
-
- /* Initialise each port */
- RTE_ETH_FOREACH_DEV(portid) {
- port_init(portid, ioat_pktmbuf_pool);
- }
+.. literalinclude:: ../../../examples/ioat/ioatfwd.c
+ :language: c
+ :start-after: Initialize each port. 8<
+ :end-before: >8 End of initializing each port.
+ :dedent: 1
Each port is configured using ``port_init()`` function. The Ethernet
ports are configured with local settings using the ``rte_eth_dev_configure()``
@@ -156,21 +147,11 @@ function and the ``port_conf`` struct. The RSS is enabled so that
multiple Rx queues could be used for packet receiving and copying by
multiple CBDMA channels per port:
-.. code-block:: c
-
- /* configuring port to use RSS for multiple RX queues */
- static const struct rte_eth_conf port_conf = {
- .rxmode = {
- .mq_mode = ETH_MQ_RX_RSS,
- .max_rx_pkt_len = RTE_ETHER_MAX_LEN
- },
- .rx_adv_conf = {
- .rss_conf = {
- .rss_key = NULL,
- .rss_hf = ETH_RSS_PROTO_MASK,
- }
- }
- };
+.. literalinclude:: ../../../examples/ioat/ioatfwd.c
+ :language: c
+ :start-after: Configuring port to use RSS for multiple RX queues. 8<
+ :end-before: >8 End of configuring port to use RSS for multiple RX queues.
+ :dedent: 1
For this example the ports are set up with the number of Rx queues provided
with -q option and 1 Tx queue using the ``rte_eth_rx_queue_setup()``
@@ -178,109 +159,51 @@ and ``rte_eth_tx_queue_setup()`` functions.
The Ethernet port is then started:
-.. code-block:: c
-
- ret = rte_eth_dev_start(portid);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "rte_eth_dev_start:err=%d, port=%u\n",
- ret, portid);
+.. literalinclude:: ../../../examples/ioat/ioatfwd.c
+ :language: c
+ :start-after: Start device. 8<
+ :end-before: >8 End of starting device.
+ :dedent: 1
Finally the Rx port is set in promiscuous mode:
-.. code-block:: c
-
- rte_eth_promiscuous_enable(portid);
+.. literalinclude:: ../../../examples/ioat/ioatfwd.c
+ :language: c
+ :start-after: RX port is set in promiscuous mode. 8<
+ :end-before: >8 End of RX port is set in promiscuous mode.
+ :dedent: 1
After that each port application assigns resources needed.
-.. code-block:: c
-
- check_link_status(ioat_enabled_port_mask);
-
- if (!cfg.nb_ports) {
- rte_exit(EXIT_FAILURE,
- "All available ports are disabled. Please set portmask.\n");
- }
-
- /* Check if there is enough lcores for all ports. */
- cfg.nb_lcores = rte_lcore_count() - 1;
- if (cfg.nb_lcores < 1)
- rte_exit(EXIT_FAILURE,
- "There should be at least one worker lcore.\n");
-
- ret = 0;
-
- if (copy_mode == COPY_MODE_IOAT_NUM) {
- assign_rawdevs();
- } else /* copy_mode == COPY_MODE_SW_NUM */ {
- assign_rings();
- }
+.. literalinclude:: ../../../examples/ioat/ioatfwd.c
+ :language: c
+ :start-after: Assigning each port resources. 8<
+ :end-before: >8 End of assigning each port resources.
+ :dedent: 1
Depending on mode set (whether copy should be done by software or by hardware)
special structures are assigned to each port. If software copy was chosen,
application have to assign ring structures for packet exchanging between lcores
assigned to ports.
-.. code-block:: c
-
- static void
- assign_rings(void)
- {
- uint32_t i;
-
- for (i = 0; i < cfg.nb_ports; i++) {
- char ring_name[20];
-
- snprintf(ring_name, 20, "rx_to_tx_ring_%u", i);
- /* Create ring for inter core communication */
- cfg.ports[i].rx_to_tx_ring = rte_ring_create(
- ring_name, ring_size,
- rte_socket_id(), RING_F_SP_ENQ);
-
- if (cfg.ports[i].rx_to_tx_ring == NULL)
- rte_exit(EXIT_FAILURE, "%s\n",
- rte_strerror(rte_errno));
- }
- }
+.. literalinclude:: ../../../examples/ioat/ioatfwd.c
+ :language: c
+ :start-after: Assign ring structures for packet exchanging. 8<
+ :end-before: >8 End of assigning ring structures for packet exchanging.
+ :dedent: 0
When using hardware copy each Rx queue of the port is assigned an
IOAT device (``assign_rawdevs()``) using IOAT Rawdev Driver API
functions:
-.. code-block:: c
-
- static void
- assign_rawdevs(void)
- {
- uint16_t nb_rawdev = 0, rdev_id = 0;
- uint32_t i, j;
-
- for (i = 0; i < cfg.nb_ports; i++) {
- for (j = 0; j < cfg.ports[i].nb_queues; j++) {
- struct rte_rawdev_info rdev_info = { 0 };
-
- do {
- if (rdev_id == rte_rawdev_count())
- goto end;
- rte_rawdev_info_get(rdev_id++, &rdev_info, 0);
- } while (strcmp(rdev_info.driver_name,
- IOAT_PMD_RAWDEV_NAME_STR) != 0);
-
- cfg.ports[i].ioat_ids[j] = rdev_id - 1;
- configure_rawdev_queue(cfg.ports[i].ioat_ids[j]);
- ++nb_rawdev;
- }
- }
- end:
- if (nb_rawdev < cfg.nb_ports * cfg.ports[0].nb_queues)
- rte_exit(EXIT_FAILURE,
- "Not enough IOAT rawdevs (%u) for all queues (%u).\n",
- nb_rawdev, cfg.nb_ports * cfg.ports[0].nb_queues);
- RTE_LOG(INFO, IOAT, "Number of used rawdevs: %u.\n", nb_rawdev);
- }
+.. literalinclude:: ../../../examples/ioat/ioatfwd.c
+ :language: c
+ :start-after: Using IOAT rawdev API functions. 8<
+ :end-before: >8 End of using IOAT rawdev API functions.
+ :dedent: 0
The initialization of hardware device is done by ``rte_rawdev_configure()``
@@ -288,23 +211,11 @@ function using ``rte_rawdev_info`` struct. After configuration the device is
started using ``rte_rawdev_start()`` function. Each of the above operations
is done in ``configure_rawdev_queue()``.
-.. code-block:: c
-
- static void
- configure_rawdev_queue(uint32_t dev_id)
- {
- struct rte_ioat_rawdev_config dev_config = { .ring_size = ring_size };
- struct rte_rawdev_info info = { .dev_private = &dev_config };
-
- if (rte_rawdev_configure(dev_id, &info, sizeof(dev_config)) != 0) {
- rte_exit(EXIT_FAILURE,
- "Error with rte_rawdev_configure()\n");
- }
- if (rte_rawdev_start(dev_id) != 0) {
- rte_exit(EXIT_FAILURE,
- "Error with rte_rawdev_start()\n");
- }
- }
+.. literalinclude:: ../../../examples/ioat/ioatfwd.c
+ :language: c
+ :start-after: Configuration of device. 8<
+ :end-before: >8 End of configuration of device.
+ :dedent: 0
If initialization is successful, memory for hardware device
statistics is allocated.
@@ -322,42 +233,22 @@ The Lcores Launching Functions
As described above, ``main()`` function invokes ``start_forwarding_cores()``
function in order to start processing for each lcore:
-.. code-block:: c
-
- static void start_forwarding_cores(void)
- {
- uint32_t lcore_id = rte_lcore_id();
-
- RTE_LOG(INFO, IOAT, "Entering %s on lcore %u\n",
- __func__, rte_lcore_id());
-
- if (cfg.nb_lcores == 1) {
- lcore_id = rte_get_next_lcore(lcore_id, true, true);
- rte_eal_remote_launch((lcore_function_t *)rxtx_main_loop,
- NULL, lcore_id);
- } else if (cfg.nb_lcores > 1) {
- lcore_id = rte_get_next_lcore(lcore_id, true, true);
- rte_eal_remote_launch((lcore_function_t *)rx_main_loop,
- NULL, lcore_id);
-
- lcore_id = rte_get_next_lcore(lcore_id, true, true);
- rte_eal_remote_launch((lcore_function_t *)tx_main_loop, NULL,
- lcore_id);
- }
- }
+.. literalinclude:: ../../../examples/ioat/ioatfwd.c
+ :language: c
+ :start-after: Start processing for each lcore. 8<
+ :end-before: >8 End of starting to processfor each lcore.
+ :dedent: 0
The function launches Rx/Tx processing functions on configured lcores
using ``rte_eal_remote_launch()``. The configured ports, their number
and number of assigned lcores are stored in user-defined
``rxtx_transmission_config`` struct:
-.. code-block:: c
-
- struct rxtx_transmission_config {
- struct rxtx_port_config ports[RTE_MAX_ETHPORTS];
- uint16_t nb_ports;
- uint16_t nb_lcores;
- };
+.. literalinclude:: ../../../examples/ioat/ioatfwd.c
+ :language: c
+ :start-after: Configuring ports and number of assigned lcores in struct. 8<
+ :end-before: >8 End of configuration of ports and number of assigned lcores.
+ :dedent: 0
The structure is initialized in 'main()' function with the values
corresponding to ports and lcores configuration provided by the user.
@@ -371,63 +262,11 @@ mode the user chose, it will enqueue packets to IOAT rawdev channels and
then invoke copy process (hardware copy), or perform software copy of each
packet using ``pktmbuf_sw_copy()`` function and enqueue them to an rte_ring:
-.. code-block:: c
-
- /* Receive packets on one port and enqueue to IOAT rawdev or rte_ring. */
- static void
- ioat_rx_port(struct rxtx_port_config *rx_config)
- {
- uint32_t nb_rx, nb_enq, i, j;
- struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
- for (i = 0; i < rx_config->nb_queues; i++) {
-
- nb_rx = rte_eth_rx_burst(rx_config->rxtx_port, i,
- pkts_burst, MAX_PKT_BURST);
-
- if (nb_rx == 0)
- continue;
-
- port_statistics.rx[rx_config->rxtx_port] += nb_rx;
-
- if (copy_mode == COPY_MODE_IOAT_NUM) {
- /* Perform packet hardware copy */
- nb_enq = ioat_enqueue_packets(pkts_burst,
- nb_rx, rx_config->ioat_ids[i]);
- if (nb_enq > 0)
- rte_ioat_perform_ops(rx_config->ioat_ids[i]);
- } else {
- /* Perform packet software copy, free source packets */
- int ret;
- struct rte_mbuf *pkts_burst_copy[MAX_PKT_BURST];
-
- ret = rte_mempool_get_bulk(ioat_pktmbuf_pool,
- (void *)pkts_burst_copy, nb_rx);
-
- if (unlikely(ret < 0))
- rte_exit(EXIT_FAILURE,
- "Unable to allocate memory.\n");
-
- for (j = 0; j < nb_rx; j++)
- pktmbuf_sw_copy(pkts_burst[j],
- pkts_burst_copy[j]);
-
- rte_mempool_put_bulk(ioat_pktmbuf_pool,
- (void *)pkts_burst, nb_rx);
-
- nb_enq = rte_ring_enqueue_burst(
- rx_config->rx_to_tx_ring,
- (void *)pkts_burst_copy, nb_rx, NULL);
-
- /* Free any not enqueued packets. */
- rte_mempool_put_bulk(ioat_pktmbuf_pool,
- (void *)&pkts_burst_copy[nb_enq],
- nb_rx - nb_enq);
- }
-
- port_statistics.copy_dropped[rx_config->rxtx_port] +=
- (nb_rx - nb_enq);
- }
- }
+.. literalinclude:: ../../../examples/ioat/ioatfwd.c
+ :language: c
+ :start-after: Receive packets on one port and enqueue to IOAT rawdev or rte_ring. 8<
+ :end-before: >8 End of receive packets on one port and enqueue to IOAT rawdev or rte_ring.
+ :dedent: 0
The packets are received in burst mode using ``rte_eth_rx_burst()``
function. When using hardware copy mode the packets are enqueued in
@@ -443,50 +282,11 @@ be copied in a single operation. This method can be used because the mbufs
are direct mbufs allocated by the apps. If another app uses external buffers,
or indirect mbufs, then multiple copy operations must be used.
-.. code-block:: c
-
- static uint32_t
- ioat_enqueue_packets(struct rte_mbuf **pkts,
- uint32_t nb_rx, uint16_t dev_id)
- {
- int ret;
- uint32_t i;
- struct rte_mbuf *pkts_copy[MAX_PKT_BURST];
-
- const uint64_t addr_offset = RTE_PTR_DIFF(pkts[0]->buf_addr,
- &pkts[0]->rearm_data);
-
- ret = rte_mempool_get_bulk(ioat_pktmbuf_pool,
- (void *)pkts_copy, nb_rx);
-
- if (unlikely(ret < 0))
- rte_exit(EXIT_FAILURE, "Unable to allocate memory.\n");
-
- for (i = 0; i < nb_rx; i++) {
- /* Perform data copy */
- ret = rte_ioat_enqueue_copy(dev_id,
- pkts[i]->buf_iova
- - addr_offset,
- pkts_copy[i]->buf_iova
- - addr_offset,
- rte_pktmbuf_data_len(pkts[i])
- + addr_offset,
- (uintptr_t)pkts[i],
- (uintptr_t)pkts_copy[i],
- 0 /* nofence */);
-
- if (ret != 1)
- break;
- }
-
- ret = i;
- /* Free any not enqueued packets. */
- rte_mempool_put_bulk(ioat_pktmbuf_pool, (void *)&pkts[i], nb_rx - i);
- rte_mempool_put_bulk(ioat_pktmbuf_pool, (void *)&pkts_copy[i],
- nb_rx - i);
-
- return ret;
- }
+.. literalinclude:: ../../../examples/ioat/ioatfwd.c
+ :language: c
+ :start-after: Receive packets on one port and enqueue to IOAT rawdev or rte_ring. 8<
+ :end-before: >8 End of receive packets on one port and enqueue to IOAT rawdev or rte_ring.
+ :dedent: 0
All completed copies are processed by ``ioat_tx_port()`` function. When using
@@ -497,56 +297,11 @@ packet MAC address is changed if it was enabled. After that copies are sent
in burst mode using `` rte_eth_tx_burst()``.
-.. code-block:: c
-
- /* Transmit packets from IOAT rawdev/rte_ring for one port. */
- static void
- ioat_tx_port(struct rxtx_port_config *tx_config)
- {
- uint32_t i, j, nb_dq = 0;
- struct rte_mbuf *mbufs_src[MAX_PKT_BURST];
- struct rte_mbuf *mbufs_dst[MAX_PKT_BURST];
-
- for (i = 0; i < tx_config->nb_queues; i++) {
- if (copy_mode == COPY_MODE_IOAT_NUM) {
- /* Deque the mbufs from IOAT device. */
- nb_dq = rte_ioat_completed_ops(
- tx_config->ioat_ids[i], MAX_PKT_BURST,
- (void *)mbufs_src, (void *)mbufs_dst);
- } else {
- /* Deque the mbufs from rx_to_tx_ring. */
- nb_dq = rte_ring_dequeue_burst(
- tx_config->rx_to_tx_ring, (void *)mbufs_dst,
- MAX_PKT_BURST, NULL);
- }
-
- if (nb_dq == 0)
- return;
-
- if (copy_mode == COPY_MODE_IOAT_NUM)
- rte_mempool_put_bulk(ioat_pktmbuf_pool,
- (void *)mbufs_src, nb_dq);
-
- /* Update macs if enabled */
- if (mac_updating) {
- for (j = 0; j < nb_dq; j++)
- update_mac_addrs(mbufs_dst[j],
- tx_config->rxtx_port);
- }
-
- const uint16_t nb_tx = rte_eth_tx_burst(
- tx_config->rxtx_port, 0,
- (void *)mbufs_dst, nb_dq);
-
- port_statistics.tx[tx_config->rxtx_port] += nb_tx;
-
- /* Free any unsent packets. */
- if (unlikely(nb_tx < nb_dq))
- rte_mempool_put_bulk(ioat_pktmbuf_pool,
- (void *)&mbufs_dst[nb_tx],
- nb_dq - nb_tx);
- }
- }
+.. literalinclude:: ../../../examples/ioat/ioatfwd.c
+ :language: c
+ :start-after: Transmit packets from IOAT rawdev/rte_ring for one port. 8<
+ :end-before: >8 End of transmitting packets from IOAT.
+ :dedent: 0
The Packet Copying Functions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -557,21 +312,11 @@ metadata from source packet to new mbuf, and then copying a data
chunk of source packet. Both memory copies are done using
``rte_memcpy()``:
-.. code-block:: c
-
- static inline void
- pktmbuf_sw_copy(struct rte_mbuf *src, struct rte_mbuf *dst)
- {
- /* Copy packet metadata */
- rte_memcpy(&dst->rearm_data,
- &src->rearm_data,
- offsetof(struct rte_mbuf, cacheline1)
- - offsetof(struct rte_mbuf, rearm_data));
-
- /* Copy packet data */
- rte_memcpy(rte_pktmbuf_mtod(dst, char *),
- rte_pktmbuf_mtod(src, char *), src->data_len);
- }
+.. literalinclude:: ../../../examples/ioat/ioatfwd.c
+ :language: c
+ :start-after: Perform packet copy there is a user-defined function. 8<
+ :end-before: >8 End of perform packet copy there is a user-defined function.
+ :dedent: 0
The metadata in this example is copied from ``rearm_data`` member of
``rte_mbuf`` struct up to ``cacheline1``.
@@ -101,33 +101,17 @@ l3fwd_ipv4_route_array and/or l3fwd_ipv6_route_array table.
The default l3fwd_ipv4_route_array table is:
-.. code-block:: c
-
- struct l3fwd_ipv4_route l3fwd_ipv4_route_array[] = {
- {RTE_IPV4(100, 10, 0, 0), 16, 0},
- {RTE_IPV4(100, 20, 0, 0), 16, 1},
- {RTE_IPV4(100, 30, 0, 0), 16, 2},
- {RTE_IPV4(100, 40, 0, 0), 16, 3},
- {RTE_IPV4(100, 50, 0, 0), 16, 4},
- {RTE_IPV4(100, 60, 0, 0), 16, 5},
- {RTE_IPV4(100, 70, 0, 0), 16, 6},
- {RTE_IPV4(100, 80, 0, 0), 16, 7},
- };
+.. literalinclude:: ../../../examples/ip_fragmentation/main.c
+ :language: c
+ :start-after: Default l3fwd_ipv4_route_array table. 8<
+ :end-before: >8 End of default l3fwd_ipv4_route_array table
The default l3fwd_ipv6_route_array table is:
-.. code-block:: c
-
- struct l3fwd_ipv6_route l3fwd_ipv6_route_array[] = {
- {{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, 48, 0},
- {{2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, 48, 1},
- {{3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, 48, 2},
- {{4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, 48, 3},
- {{5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, 48, 4},
- {{6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, 48, 5},
- {{7, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, 48, 6},
- {{8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, 48, 7},
- };
+.. literalinclude:: ../../../examples/ip_fragmentation/main.c
+ :language: c
+ :start-after: Default l3fwd_ipv6_route_array table. 8<
+ :end-before: >8 End of default l3fwd_ipv6_route_array table.
For example, for the input IPv4 packet with destination address: 100.10.1.1 and packet length 9198 bytes,
seven IPv4 packets will be sent out from port #0 to the destination address 100.10.1.1:
@@ -104,33 +104,17 @@ fragment extension header are present in the packet.
The default l3fwd_ipv4_route_array table is:
-.. code-block:: c
-
- struct l3fwd_ipv4_route l3fwd_ipv4_route_array[] = {
- {RTE_IPV4(100, 10, 0, 0), 16, 0},
- {RTE_IPV4(100, 20, 0, 0), 16, 1},
- {RTE_IPV4(100, 30, 0, 0), 16, 2},
- {RTE_IPV4(100, 40, 0, 0), 16, 3},
- {RTE_IPV4(100, 50, 0, 0), 16, 4},
- {RTE_IPV4(100, 60, 0, 0), 16, 5},
- {RTE_IPV4(100, 70, 0, 0), 16, 6},
- {RTE_IPV4(100, 80, 0, 0), 16, 7},
- };
+.. literalinclude:: ../../../examples/ip_reassembly/main.c
+ :language: c
+ :start-after: Default l3fwd_ipv4_route_array table. 8<
+ :end-before: >8 End of default l3fwd_ipv4_route_array table.
The default l3fwd_ipv6_route_array table is:
-.. code-block:: c
-
- struct l3fwd_ipv6_route l3fwd_ipv6_route_array[] = {
- {{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, 48, 0},
- {{2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, 48, 1},
- {{3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, 48, 2},
- {{4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, 48, 3},
- {{5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, 48, 4},
- {{6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, 48, 5},
- {{7, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, 48, 6},
- {{8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, 48, 7},
- };
+.. literalinclude:: ../../../examples/ip_reassembly/main.c
+ :language: c
+ :start-after: Default l3fwd_ipv6_route_array table. 8<
+ :end-before: >8 End of default l3fwd_ipv6_route_array table.
For example, for the fragmented input IPv4 packet with destination address: 100.10.1.1,
a reassembled IPv4 packet be sent out from port #0 to the destination address 100.10.1.1
@@ -153,15 +137,11 @@ To avoid lock contention, each RX queue has its own Fragment Table,
e.g. the application can't handle the situation when different fragments of the same packet arrive through different RX queues.
Each table entry can hold information about packet consisting of up to RTE_LIBRTE_IP_FRAG_MAX_FRAGS fragments.
-.. code-block:: c
-
- frag_cycles = (rte_get_tsc_hz() + MS_PER_S - 1) / MS_PER_S * max_flow_ttl;
-
- if ((qconf->frag_tbl[queue] = rte_ip_frag_tbl_create(max_flow_num, IPV4_FRAG_TBL_BUCKET_ENTRIES, max_flow_num, frag_cycles, socket)) == NULL)
- {
- RTE_LOG(ERR, IP_RSMBL, "ip_frag_tbl_create(%u) on " "lcore: %u for queue: %u failed\n", max_flow_num, lcore, queue);
- return -1;
- }
+.. literalinclude:: ../../../examples/ip_reassembly/main.c
+ :language: c
+ :start-after: Each table entry holds information about packet fragmentation. 8<
+ :end-before: >8 End of holding packet fragmentation.
+ :dedent: 1
Mempools Initialization
~~~~~~~~~~~~~~~~~~~~~~~
@@ -172,22 +152,11 @@ can be stored inside Fragment Table waiting for remaining fragments.
To keep mempool size under reasonable limits and to avoid situation when one RX queue can starve other queues,
each RX queue uses its own mempool.
-.. code-block:: c
-
- nb_mbuf = RTE_MAX(max_flow_num, 2UL * MAX_PKT_BURST) * RTE_LIBRTE_IP_FRAG_MAX_FRAGS;
- nb_mbuf *= (port_conf.rxmode.max_rx_pkt_len + BUF_SIZE - 1) / BUF_SIZE;
- nb_mbuf *= 2; /* ipv4 and ipv6 */
- nb_mbuf += RTE_TEST_RX_DESC_DEFAULT + RTE_TEST_TX_DESC_DEFAULT;
- nb_mbuf = RTE_MAX(nb_mbuf, (uint32_t)NB_MBUF);
-
- snprintf(buf, sizeof(buf), "mbuf_pool_%u_%u", lcore, queue);
-
- if ((rxq->pool = rte_mempool_create(buf, nb_mbuf, MBUF_SIZE, 0, sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL,
- rte_pktmbuf_init, NULL, socket, MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) == NULL) {
-
- RTE_LOG(ERR, IP_RSMBL, "mempool_create(%s) failed", buf);
- return -1;
- }
+.. literalinclude:: ../../../examples/ip_reassembly/main.c
+ :language: c
+ :start-after: mbufs stored int the gragment table. 8<
+ :end-before: >8 End of mbufs stored int the fragmentation table.
+ :dedent: 1
Packet Reassembly and Forwarding
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -97,14 +97,11 @@ The IPv4 Multicast sample application uses three memory pools.
Two of the pools are for indirect buffers used for packet duplication purposes.
Memory pools for indirect buffers are initialized differently from the memory pool for direct buffers:
-.. code-block:: c
-
- packet_pool = rte_pktmbuf_pool_create("packet_pool", NB_PKT_MBUF, 32,
- 0, PKT_MBUF_DATA_SIZE, rte_socket_id());
- header_pool = rte_pktmbuf_pool_create("header_pool", NB_HDR_MBUF, 32,
- 0, HDR_MBUF_DATA_SIZE, rte_socket_id());
- clone_pool = rte_pktmbuf_pool_create("clone_pool", NB_CLONE_MBUF, 32,
- 0, 0, rte_socket_id());
+.. literalinclude:: ../../../examples/ipv4_multicast/main.c
+ :language: c
+ :start-after: Create the mbuf pools. 8<
+ :end-before: >8 End of create mbuf pools.
+ :dedent: 1
The reason for this is because indirect buffers are not supposed to hold any packet data and
therefore can be initialized with lower amount of reserved memory for each buffer.
@@ -114,27 +111,10 @@ Hash Initialization
The hash object is created and loaded with the pre-configured entries read from a global array:
-.. code-block:: c
-
- static int
-
- init_mcast_hash(void)
- {
- uint32_t i;
- mcast_hash_params.socket_id = rte_socket_id();
-
- mcast_hash = rte_fbk_hash_create(&mcast_hash_params);
- if (mcast_hash == NULL){
- return -1;
- }
-
- for (i = 0; i < N_MCAST_GROUPS; i ++){
- if (rte_fbk_hash_add_key(mcast_hash, mcast_group_table[i].ip, mcast_group_table[i].port_mask) < 0) {
- return -1;
- }
- }
- return 0;
- }
+.. literalinclude:: ../../../examples/ipv4_multicast/main.c
+ :language: c
+ :start-after: Hash object is created and loaded. 8<
+ :end-before: >8 End of hash object is created and loaded.
Forwarding
~~~~~~~~~~
@@ -142,106 +122,62 @@ Forwarding
All forwarding is done inside the mcast_forward() function.
Firstly, the Ethernet* header is removed from the packet and the IPv4 address is extracted from the IPv4 header:
-.. code-block:: c
-
- /* Remove the Ethernet header from the input packet */
-
- iphdr = (struct rte_ipv4_hdr *)rte_pktmbuf_adj(m, sizeof(struct rte_ether_hdr));
- RTE_ASSERT(iphdr != NULL);
- dest_addr = rte_be_to_cpu_32(iphdr->dst_addr);
+.. literalinclude:: ../../../examples/ipv4_multicast/main.c
+ :language: c
+ :start-after: Remove the Ethernet header from the input packet. 8<
+ :end-before: >8 End of removing the Ethernet header from the input packet.
+ :dedent: 1
Then, the packet is checked to see if it has a multicast destination address and
if the routing table has any ports assigned to the destination address:
-.. code-block:: c
-
- if (!RTE_IS_IPV4_MCAST(dest_addr) ||
- (hash = rte_fbk_hash_lookup(mcast_hash, dest_addr)) <= 0 ||
- (port_mask = hash & enabled_port_mask) == 0) {
- rte_pktmbuf_free(m);
- return;
- }
+.. literalinclude:: ../../../examples/ipv4_multicast/main.c
+ :language: c
+ :start-after: Check valid multicast address. 8<
+ :end-before: >8 End of valid multicast address check.
+ :dedent: 1
Then, the number of ports in the destination portmask is calculated with the help of the bitcnt() function:
-.. code-block:: c
-
- /* Get number of bits set. */
-
- static inline uint32_t bitcnt(uint32_t v)
- {
- uint32_t n;
-
- for (n = 0; v != 0; v &= v - 1, n++)
- ;
- return n;
- }
+.. literalinclude:: ../../../examples/ipv4_multicast/main.c
+ :language: c
+ :start-after: Get number of bits set. 8<
+ :end-before: >8 End of getting number of bits set.
This is done to determine which forwarding algorithm to use.
This is explained in more detail in the next section.
Thereafter, a destination Ethernet address is constructed:
-.. code-block:: c
-
- /* construct destination Ethernet address */
-
- dst_eth_addr = ETHER_ADDR_FOR_IPV4_MCAST(dest_addr);
+.. literalinclude:: ../../../examples/ipv4_multicast/main.c
+ :language: c
+ :start-after: Construct destination ethernet address. 8<
+ :end-before: >8 End of constructing destination ethernet address.
+ :dedent: 1
Since Ethernet addresses are also part of the multicast process, each outgoing packet carries the same destination Ethernet address.
The destination Ethernet address is constructed from the lower 23 bits of the multicast group OR-ed
with the Ethernet address 01:00:5e:00:00:00, as per RFC 1112:
-.. code-block:: c
-
- #define ETHER_ADDR_FOR_IPV4_MCAST(x) \
- (rte_cpu_to_be_64(0x01005e000000ULL | ((x) & 0x7fffff)) >> 16)
+.. literalinclude:: ../../../examples/ipv4_multicast/main.c
+ :language: c
+ :start-after: Construct Ethernet multicast address from IPv4 multicast Address. 8<
+ :end-before: >8 End of Construction of multicast address from IPv4 multicast address.
Then, packets are dispatched to the destination ports according to the portmask associated with a multicast group:
-.. code-block:: c
-
- for (port = 0; use_clone != port_mask; port_mask >>= 1, port++) {
- /* Prepare output packet and send it out. */
-
- if ((port_mask & 1) != 0) {
- if (likely ((mc = mcast_out_pkt(m, use_clone)) != NULL))
- mcast_send_pkt(mc, &dst_eth_addr.as_addr, qconf, port);
- else if (use_clone == 0)
- rte_pktmbuf_free(m);
- }
- }
+.. literalinclude:: ../../../examples/ipv4_multicast/main.c
+ :language: c
+ :start-after: Packets dispatched to destination ports. 8<
+ :end-before: >8 End of packets dispatched to destination ports.
+ :dedent: 1
The actual packet transmission is done in the mcast_send_pkt() function:
-.. code-block:: c
-
- static inline void mcast_send_pkt(struct rte_mbuf *pkt, struct rte_ether_addr *dest_addr, struct lcore_queue_conf *qconf, uint16_t port)
- {
- struct rte_ether_hdr *ethdr;
- uint16_t len;
-
- /* Construct Ethernet header. */
-
- ethdr = (struct rte_ether_hdr *)rte_pktmbuf_prepend(pkt, (uint16_t) sizeof(*ethdr));
-
- RTE_ASSERT(ethdr != NULL);
-
- rte_ether_addr_copy(dest_addr, ðdr->d_addr);
- rte_ether_addr_copy(&ports_eth_addr[port], ðdr->s_addr);
- ethdr->ether_type = rte_be_to_cpu_16(RTE_ETHER_TYPE_IPV4);
-
- /* Put new packet into the output queue */
-
- len = qconf->tx_mbufs[port].len;
- qconf->tx_mbufs[port].m_table[len] = pkt;
- qconf->tx_mbufs[port].len = ++len;
-
- /* Transmit packets */
-
- if (unlikely(MAX_PKT_BURST == len))
- send_burst(qconf, port);
- }
+.. literalinclude:: ../../../examples/ipv4_multicast/main.c
+ :language: c
+ :start-after: Write new Ethernet header to outgoing packets. 8<
+ :end-before: >8 End of writing new Ethernet headers.
Buffer Cloning
~~~~~~~~~~~~~~
@@ -281,45 +217,15 @@ As the number of outgoing ports (and/or input segments) grows, the second approa
Depending on the number of segments or the number of ports in the outgoing portmask,
either the first (with cloning) or the second (without cloning) approach is taken:
-.. code-block:: c
-
- use_clone = (port_num <= MCAST_CLONE_PORTS && m->pkt.nb_segs <= MCAST_CLONE_SEGS);
+.. literalinclude:: ../../../examples/ipv4_multicast/main.c
+ :language: c
+ :start-after: Should we use rte_pktmbuf_clone() or not. 8<
+ :end-before: >8 End of using rte_pktmbuf_clone().
+ :dedent: 1
It is the mcast_out_pkt() function that performs the packet duplication (either with or without actually cloning the buffers):
-.. code-block:: c
-
- static inline struct rte_mbuf *mcast_out_pkt(struct rte_mbuf *pkt, int use_clone)
- {
- struct rte_mbuf *hdr;
-
- /* Create new mbuf for the header. */
-
- if (unlikely ((hdr = rte_pktmbuf_alloc(header_pool)) == NULL))
- return NULL;
-
- /* If requested, then make a new clone packet. */
-
- if (use_clone != 0 && unlikely ((pkt = rte_pktmbuf_clone(pkt, clone_pool)) == NULL)) {
- rte_pktmbuf_free(hdr);
- return NULL;
- }
-
- /* prepend new header */
-
- hdr->pkt.next = pkt;
-
- /* update header's fields */
-
- hdr->pkt.pkt_len = (uint16_t)(hdr->pkt.data_len + pkt->pkt.pkt_len);
- hdr->pkt.nb_segs = pkt->pkt.nb_segs + 1;
-
- /* copy metadata from source packet */
-
- hdr->pkt.in_port = pkt->pkt.in_port;
- hdr->pkt.vlan_macip = pkt->pkt.vlan_macip;
- hdr->pkt.hash = pkt->pkt.hash;
- rte_mbuf_sanity_check(hdr, RTE_MBUF_PKT, 1);
-
- return hdr;
- }
+.. literalinclude:: ../../../examples/ipv4_multicast/main.c
+ :language: c
+ :start-after: mcast_out_pkt 8<
+ :end-before: >8 End of mcast_out_kt.
@@ -99,39 +99,31 @@ The keepalive functionality is initialized with a struct
rte_keepalive and the callback function to invoke in the
case of a timeout.
-.. code-block:: c
-
- rte_global_keepalive_info = rte_keepalive_create(&dead_core, NULL);
- if (rte_global_keepalive_info == NULL)
- rte_exit(EXIT_FAILURE, "keepalive_create() failed");
+.. literalinclude:: ../../../examples/l2fwd-keepalive/main.c
+ :language: c
+ :start-after: Initialize keepalive functionality. 8<
+ :end-before: >8 End of initializing keepalive functionality.
+ :dedent: 2
The function that issues the pings keepalive_dispatch_pings()
is configured to run every check_period milliseconds.
-.. code-block:: c
-
- if (rte_timer_reset(&hb_timer,
- (check_period * rte_get_timer_hz()) / 1000,
- PERIODICAL,
- rte_lcore_id(),
- &rte_keepalive_dispatch_pings,
- rte_global_keepalive_info
- ) != 0 )
- rte_exit(EXIT_FAILURE, "Keepalive setup failure.\n");
+.. literalinclude:: ../../../examples/l2fwd-keepalive/main.c
+ :language: c
+ :start-after: Issues the pings keepalive_dispatch_pings(). 8<
+ :end-before: >8 End of issuing the pings keepalive_dispatch_pings().
+ :dedent: 2
The rest of the initialization and run-time path follows
the same paths as the L2 forwarding application. The only
addition to the main processing loop is the mark alive
functionality and the example random failures.
-.. code-block:: c
-
- rte_keepalive_mark_alive(&rte_global_keepalive_info);
- cur_tsc = rte_rdtsc();
-
- /* Die randomly within 7 secs for demo purposes.. */
- if (cur_tsc - tsc_initial > tsc_lifetime)
- break;
+.. literalinclude:: ../../../examples/l2fwd-keepalive/main.c
+ :language: c
+ :start-after: Keepalive heartbeat. 8<
+ :end-before: >8 End of keepalive heartbeat.
+ :dedent: 2
The rte_keepalive_mark_alive function simply sets the core state to alive.
@@ -173,21 +173,21 @@ The first task is to initialize the Environment Abstraction Layer (EAL). The
``argc`` and ``argv`` arguments are provided to the ``rte_eal_init()``
function. The value returned is the number of parsed arguments:
-.. code-block:: c
-
- int ret = rte_eal_init(argc, argv);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
+.. literalinclude:: ../../../examples/l2fwd-cat/l2fwd-cat.c
+ :language: c
+ :start-after: Initialize the Environment Abstraction Layer (EAL). 8<
+ :end-before: >8 End of initializion the Environment Abstraction Layer (EAL).
+ :dedent: 1
The next task is to initialize the PQoS library and configure CAT. The
``argc`` and ``argv`` arguments are provided to the ``cat_init()``
function. The value returned is the number of parsed arguments:
-.. code-block:: c
-
- int ret = cat_init(argc, argv);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "PQOS: L3CA init failed!\n");
+.. literalinclude:: ../../../examples/l2fwd-cat/l2fwd-cat.c
+ :language: c
+ :start-after: Initialize the PQoS. 8<
+ :end-before: >8 End of initialization of PQoS.
+ :dedent: 1
``cat_init()`` is a wrapper function which parses the command, validates
the requested parameters and configures CAT accordingly.
@@ -249,62 +249,20 @@ is within the structure of each device.
The following code checks if the device supports the specified cipher algorithm
(similar for the authentication algorithm):
-.. code-block:: c
-
- /* Check if device supports cipher algo */
- i = 0;
- opt_cipher_algo = options->cipher_xform.cipher.algo;
- cap = &dev_info.capabilities[i];
- while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
- cap_cipher_algo = cap->sym.cipher.algo;
- if (cap->sym.xform_type ==
- RTE_CRYPTO_SYM_XFORM_CIPHER) {
- if (cap_cipher_algo == opt_cipher_algo) {
- if (check_type(options, &dev_info) == 0)
- break;
- }
- }
- cap = &dev_info.capabilities[++i];
- }
+.. literalinclude:: ../../../examples/l2fwd-crypto/main.c
+ :language: c
+ :start-after: Check if device supports cipher algo. 8<
+ :end-before: >8 End of check if device supports cipher algo.
+ :dedent: 2
If a capable crypto device is found, key sizes are checked to see if they are supported
(cipher key and IV for the ciphering):
-.. code-block:: c
-
- /*
- * Check if length of provided cipher key is supported
- * by the algorithm chosen.
- */
- if (options->ckey_param) {
- if (check_supported_size(
- options->cipher_xform.cipher.key.length,
- cap->sym.cipher.key_size.min,
- cap->sym.cipher.key_size.max,
- cap->sym.cipher.key_size.increment)
- != 0) {
- printf("Unsupported cipher key length\n");
- return -1;
- }
- /*
- * Check if length of the cipher key to be randomly generated
- * is supported by the algorithm chosen.
- */
- } else if (options->ckey_random_size != -1) {
- if (check_supported_size(options->ckey_random_size,
- cap->sym.cipher.key_size.min,
- cap->sym.cipher.key_size.max,
- cap->sym.cipher.key_size.increment)
- != 0) {
- printf("Unsupported cipher key length\n");
- return -1;
- }
- options->cipher_xform.cipher.key.length =
- options->ckey_random_size;
- /* No size provided, use minimum size. */
- } else
- options->cipher_xform.cipher.key.length =
- cap->sym.cipher.key_size.min;
+.. literalinclude:: ../../../examples/l2fwd-crypto/main.c
+ :language: c
+ :start-after: Check if capable cipher is supported. 8<
+ :end-before: >8 End of checking if cipher is supported.
+ :dedent: 2
After all the checks, the device is configured and it is added to the
crypto device list.
@@ -322,48 +280,10 @@ pointers to the keys, lengths... etc.
This session is created and is later attached to the crypto operation:
-.. code-block:: c
-
- static struct rte_cryptodev_sym_session *
- initialize_crypto_session(struct l2fwd_crypto_options *options,
- uint8_t cdev_id)
- {
- struct rte_crypto_sym_xform *first_xform;
- struct rte_cryptodev_sym_session *session;
- uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
- struct rte_mempool *sess_mp = session_pool_socket[socket_id];
-
-
- if (options->xform_chain == L2FWD_CRYPTO_AEAD) {
- first_xform = &options->aead_xform;
- } else if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH) {
- first_xform = &options->cipher_xform;
- first_xform->next = &options->auth_xform;
- } else if (options->xform_chain == L2FWD_CRYPTO_HASH_CIPHER) {
- first_xform = &options->auth_xform;
- first_xform->next = &options->cipher_xform;
- } else if (options->xform_chain == L2FWD_CRYPTO_CIPHER_ONLY) {
- first_xform = &options->cipher_xform;
- } else {
- first_xform = &options->auth_xform;
- }
-
- session = rte_cryptodev_sym_session_create(sess_mp);
-
- if (session == NULL)
- return NULL;
-
- if (rte_cryptodev_sym_session_init(cdev_id, session,
- first_xform, sess_mp) < 0)
- return NULL;
-
- return session;
- }
-
- ...
-
- port_cparams[i].session = initialize_crypto_session(options,
- port_cparams[i].dev_id);
+.. literalinclude:: ../../../examples/l2fwd-crypto/main.c
+ :language: c
+ :start-after: Session is created and is later attached to the crypto operation. 8<
+ :end-before: >8 End of creation of session.
Crypto operation creation
~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -371,24 +291,11 @@ Crypto operation creation
Given N packets received from a RX PORT, N crypto operations are allocated
and filled:
-.. code-block:: c
-
- if (nb_rx) {
- /*
- * If we can't allocate a crypto_ops, then drop
- * the rest of the burst and dequeue and
- * process the packets to free offload structs
- */
- if (rte_crypto_op_bulk_alloc(
- l2fwd_crypto_op_pool,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- ops_burst, nb_rx) !=
- nb_rx) {
- for (j = 0; j < nb_rx; j++)
- rte_pktmbuf_free(pkts_burst[i]);
-
- nb_rx = 0;
- }
+.. literalinclude:: ../../../examples/l2fwd-crypto/main.c
+ :language: c
+ :start-after: Allocate and fillcrypto operations. 8<
+ :end-before: >8 End of crypto operation allocated and filled.
+ :dedent: 3
After filling the crypto operation (including session attachment),
the mbuf which will be transformed is attached to it::
@@ -406,79 +313,22 @@ Before doing so, for performance reasons, the operation stays in a buffer.
When the buffer has enough operations (MAX_PKT_BURST), they are enqueued in the device,
which will perform the operation at that moment:
-.. code-block:: c
-
- static int
- l2fwd_crypto_enqueue(struct rte_crypto_op *op,
- struct l2fwd_crypto_params *cparams)
- {
- unsigned lcore_id, len;
- struct lcore_queue_conf *qconf;
-
- lcore_id = rte_lcore_id();
-
- qconf = &lcore_queue_conf[lcore_id];
- len = qconf->op_buf[cparams->dev_id].len;
- qconf->op_buf[cparams->dev_id].buffer[len] = op;
- len++;
+.. literalinclude:: ../../../examples/l2fwd-crypto/main.c
+ :language: c
+ :start-after: Crypto enqueue. 8<
+ :end-before: >8 End of crypto enqueue.
- /* enough ops to be sent */
- if (len == MAX_PKT_BURST) {
- l2fwd_crypto_send_burst(qconf, MAX_PKT_BURST, cparams);
- len = 0;
- }
-
- qconf->op_buf[cparams->dev_id].len = len;
- return 0;
- }
-
- ...
-
- static int
- l2fwd_crypto_send_burst(struct lcore_queue_conf *qconf, unsigned n,
- struct l2fwd_crypto_params *cparams)
- {
- struct rte_crypto_op **op_buffer;
- unsigned ret;
-
- op_buffer = (struct rte_crypto_op **)
- qconf->op_buf[cparams->dev_id].buffer;
-
- ret = rte_cryptodev_enqueue_burst(cparams->dev_id,
- cparams->qp_id, op_buffer, (uint16_t) n);
-
- crypto_statistics[cparams->dev_id].enqueued += ret;
- if (unlikely(ret < n)) {
- crypto_statistics[cparams->dev_id].errors += (n - ret);
- do {
- rte_pktmbuf_free(op_buffer[ret]->sym->m_src);
- rte_crypto_op_free(op_buffer[ret]);
- } while (++ret < n);
- }
-
- return 0;
- }
+.. literalinclude:: ../../../examples/l2fwd-crypto/main.c
+ :language: c
+ :start-after: l2fwd_crypto_send_burst 8<
+ :end-before: >8 End of l2fwd_crypto_send_burst.
After this, the operations are dequeued from the device, and the transformed mbuf
is extracted from the operation. Then, the operation is freed and the mbuf is
forwarded as it is done in the L2 forwarding application.
-.. code-block:: c
-
- /* Dequeue packets from Crypto device */
- do {
- nb_rx = rte_cryptodev_dequeue_burst(
- cparams->dev_id, cparams->qp_id,
- ops_burst, MAX_PKT_BURST);
-
- crypto_statistics[cparams->dev_id].dequeued +=
- nb_rx;
-
- /* Forward crypto'd packets */
- for (j = 0; j < nb_rx; j++) {
- m = ops_burst[j]->sym->m_src;
-
- rte_crypto_op_free(ops_burst[j]);
- l2fwd_simple_forward(m, portid);
- }
- } while (nb_rx == MAX_PKT_BURST);
+.. literalinclude:: ../../../examples/l2fwd-crypto/main.c
+ :language: c
+ :start-after: Dequeue packets from Crypto device. 8<
+ :end-before: >8 End of dequeue packets from crypto device.
+ :dedent: 3
@@ -131,35 +131,11 @@ EAL arguments are parsed first, then application-specific arguments.
This is done at the beginning of the main() function and eventdev parameters
are parsed in eventdev_resource_setup() function during eventdev setup:
-.. code-block:: c
-
- /* init EAL */
-
- ret = rte_eal_init(argc, argv);
- if (ret < 0)
- rte_panic("Invalid EAL arguments\n");
-
- argc -= ret;
- argv += ret;
-
- /* parse application arguments (after the EAL ones) */
-
- ret = l2fwd_parse_args(argc, argv);
- if (ret < 0)
- rte_panic("Invalid L2FWD arguments\n");
- .
- .
- .
-
- /* Parse eventdev command line options */
- ret = parse_eventdev_args(argc, argv);
- if (ret < 0)
- return ret;
-
-
-
-
-.. _l2_fwd_event_app_mbuf_init:
+.. literalinclude:: ../../../examples/l2fwd-event/main.c
+ :language: c
+ :start-after: Init EAL. 8<
+ :end-before: >8 End of init EAL.
+ :dedent: 1
Mbuf Pool Initialization
~~~~~~~~~~~~~~~~~~~~~~~~
@@ -168,16 +144,11 @@ Once the arguments are parsed, the mbuf pool is created.
The mbuf pool contains a set of mbuf objects that will be used by the driver
and the application to store network packet data:
-.. code-block:: c
-
- /* create the mbuf pool */
-
- l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF,
- MEMPOOL_CACHE_SIZE, 0,
- RTE_MBUF_DEFAULT_BUF_SIZE,
- rte_socket_id());
- if (l2fwd_pktmbuf_pool == NULL)
- rte_panic("Cannot init mbuf pool\n");
+.. literalinclude:: ../../../examples/l2fwd-event/main.c
+ :language: c
+ :start-after: Create the mbuf pool. 8<
+ :end-before: >8 End of creation of mbuf pool.
+ :dedent: 1
The rte_mempool is a generic structure used to handle pools of objects.
In this case, it is necessary to create a pool that will be used by the driver.
@@ -202,50 +173,22 @@ of the driver. To fully understand this code, it is recommended to study the
chapters that related to the Poll Mode and Event mode Driver in the
*DPDK Programmer's Guide* - Rel 1.4 EAR and the *DPDK API Reference*.
-.. code-block:: c
-
- /* reset l2fwd_dst_ports */
-
- for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++)
- l2fwd_dst_ports[portid] = 0;
-
- last_port = 0;
-
- /*
- * Each logical core is assigned a dedicated TX queue on each port.
- */
-
- RTE_ETH_FOREACH_DEV(portid) {
- /* skip ports that are not enabled */
-
- if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
- continue;
-
- if (nb_ports_in_mask % 2) {
- l2fwd_dst_ports[portid] = last_port;
- l2fwd_dst_ports[last_port] = portid;
- }
- else
- last_port = portid;
-
- nb_ports_in_mask++;
-
- rte_eth_dev_info_get((uint8_t) portid, &dev_info);
- }
+.. literalinclude:: ../../../examples/l2fwd-event/main.c
+ :language: c
+ :start-after: Reset l2fwd_dst_ports. 8<
+ :end-before: >8 End of reset l2fwd_dst_ports.
+ :dedent: 1
The next step is to configure the RX and TX queues. For each port, there is only
one RX queue (only one lcore is able to poll a given port). The number of TX
queues depends on the number of available lcores. The rte_eth_dev_configure()
function is used to configure the number of queues for a port:
-.. code-block:: c
-
- ret = rte_eth_dev_configure((uint8_t)portid, 1, 1, &port_conf);
- if (ret < 0)
- rte_panic("Cannot configure device: err=%d, port=%u\n",
- ret, portid);
-
-.. _l2_fwd_event_app_rx_init:
+.. literalinclude:: ../../../examples/l2fwd-event/l2fwd_common.c
+ :language: c
+ :start-after: Configure RX and TX queue. 8<
+ :end-before: >8 End of configuration RX and TX queue.
+ :dedent: 2
RX Queue Initialization
~~~~~~~~~~~~~~~~~~~~~~~
@@ -258,27 +201,19 @@ ports with one lcore. If there are 16 ports on the target (and if the portmask
argument is -p ffff ), the application will need four lcores to poll all the
ports.
-.. code-block:: c
-
- ret = rte_eth_rx_queue_setup((uint8_t) portid, 0, nb_rxd, SOCKET0,
- &rx_conf, l2fwd_pktmbuf_pool);
- if (ret < 0)
-
- rte_panic("rte_eth_rx_queue_setup: err=%d, port=%u\n",
- ret, portid);
+.. literalinclude:: ../../../examples/l2fwd-event/l2fwd_common.c
+ :language: c
+ :start-after: Using lcore to poll one or several ports. 8<
+ :end-before: >8 End of using lcore to poll one or several ports.
+ :dedent: 2
The list of queues that must be polled for a given lcore is stored in a private
structure called struct lcore_queue_conf.
-.. code-block:: c
-
- struct lcore_queue_conf {
- unsigned n_rx_port;
- unsigned rx_port_list[MAX_RX_QUEUE_PER_LCORE];
- struct mbuf_table tx_mbufs[L2FWD_MAX_PORTS];
- } rte_cache_aligned;
-
- struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
+.. literalinclude:: ../../../examples/l2fwd/main.c
+ :language: c
+ :start-after: List of queues to be polled for a given lcore. 8<
+ :end-before: >8 End of list of queues to be polled for a given lcore.
The values n_rx_port and rx_port_list[] are used in the main packet processing
loop (see :ref:`l2_fwd_event_app_rx_tx_packets`).
@@ -291,17 +226,11 @@ TX Queue Initialization
Each lcore should be able to transmit on any port. For every port, a single TX
queue is initialized.
-.. code-block:: c
-
- /* init one TX queue on each port */
-
- fflush(stdout);
-
- ret = rte_eth_tx_queue_setup((uint8_t) portid, 0, nb_txd,
- rte_eth_dev_socket_id(portid), &tx_conf);
- if (ret < 0)
- rte_panic("rte_eth_tx_queue_setup:err=%d, port=%u\n",
- ret, (unsigned) portid);
+.. literalinclude:: ../../../examples/l2fwd-event/l2fwd_common.c
+ :language: c
+ :start-after: Init one TX queue on each port. 8<
+ :end-before: >8 End of init one TX queue on each port.
+ :dedent: 2
To configure eventdev support, application setups following components:
@@ -319,37 +248,21 @@ Application can use either H/W or S/W based event device scheduler
implementation and supports single instance of event device. It configures event
device as per below configuration
-.. code-block:: c
-
- struct rte_event_dev_config event_d_conf = {
- .nb_event_queues = ethdev_count, /* Dedicated to each Ethernet port */
- .nb_event_ports = num_workers, /* Dedicated to each lcore */
- .nb_events_limit = 4096,
- .nb_event_queue_flows = 1024,
- .nb_event_port_dequeue_depth = 128,
- .nb_event_port_enqueue_depth = 128
- };
-
- ret = rte_event_dev_configure(event_d_id, &event_d_conf);
- if (ret < 0)
- rte_panic("Error in configuring event device\n");
+.. literalinclude:: ../../../examples/l2fwd-event/l2fwd_event_generic.c
+ :language: c
+ :start-after: Configures event device as per below configuration. 8<
+ :end-before: >8 End of configuration event device as per below configuration.
+ :dedent: 1
In case of S/W scheduler, application runs eventdev scheduler service on service
core. Application retrieves service id and finds the best possible service core to
run S/W scheduler.
-.. code-block:: c
-
- rte_event_dev_info_get(evt_rsrc->event_d_id, &evdev_info);
- if (evdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED) {
- ret = rte_event_dev_service_id_get(evt_rsrc->event_d_id,
- &service_id);
- if (ret != -ESRCH && ret != 0)
- rte_panic("Error in starting eventdev service\n");
- l2fwd_event_service_enable(service_id);
- }
-
-.. _l2_fwd_app_event_queue_init:
+.. literalinclude:: ../../../examples/l2fwd-event/l2fwd_event.c
+ :language: c
+ :start-after: Running eventdev scheduler service on service core. 8<
+ :end-before: >8 End of running eventdev scheduler service on service core.
+ :dedent: 1
Event queue Initialization
~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -357,24 +270,11 @@ Each Ethernet device is assigned a dedicated event queue which will be linked
to all available event ports i.e. each lcore can dequeue packets from any of the
Ethernet ports.
-.. code-block:: c
-
- struct rte_event_queue_conf event_q_conf = {
- .nb_atomic_flows = 1024,
- .nb_atomic_order_sequences = 1024,
- .event_queue_cfg = 0,
- .schedule_type = RTE_SCHED_TYPE_ATOMIC,
- .priority = RTE_EVENT_DEV_PRIORITY_HIGHEST
- };
-
- /* User requested sched mode */
- event_q_conf.schedule_type = eventq_sched_mode;
- for (event_q_id = 0; event_q_id < ethdev_count; event_q_id++) {
- ret = rte_event_queue_setup(event_d_id, event_q_id,
- &event_q_conf);
- if (ret < 0)
- rte_panic("Error in configuring event queue\n");
- }
+.. literalinclude:: ../../../examples/l2fwd-event/l2fwd_event_generic.c
+ :language: c
+ :start-after: Event queue initialization. 8<
+ :end-before: >8 End of event queue initialization.
+ :dedent: 1
In case of S/W scheduler, an extra event queue is created which will be used for
Tx adapter service function for enqueue operation.
@@ -387,45 +287,20 @@ Each worker thread is assigned a dedicated event port for enq/deq operations
to/from an event device. All event ports are linked with all available event
queues.
-.. code-block:: c
-
- struct rte_event_port_conf event_p_conf = {
- .dequeue_depth = 32,
- .enqueue_depth = 32,
- .new_event_threshold = 4096
- };
-
- for (event_p_id = 0; event_p_id < num_workers; event_p_id++) {
- ret = rte_event_port_setup(event_d_id, event_p_id,
- &event_p_conf);
- if (ret < 0)
- rte_panic("Error in configuring event port %d\n", event_p_id);
-
- ret = rte_event_port_link(event_d_id, event_p_id, NULL,
- NULL, 0);
- if (ret < 0)
- rte_panic("Error in linking event port %d to queue\n",
- event_p_id);
- }
+.. literalinclude:: ../../../examples/l2fwd-event/l2fwd_event_generic.c
+ :language: c
+ :start-after: Event port initialization. 8<
+ :end-before: >8 End of event port initialization.
+ :dedent: 1
In case of S/W scheduler, an extra event port is created by DPDK library which
is retrieved by the application and same will be used by Tx adapter service.
-.. code-block:: c
-
- ret = rte_event_eth_tx_adapter_event_port_get(tx_adptr_id, &tx_port_id);
- if (ret)
- rte_panic("Failed to get Tx adapter port id: %d\n", ret);
-
- ret = rte_event_port_link(event_d_id, tx_port_id,
- &evt_rsrc.evq.event_q_id[
- evt_rsrc.evq.nb_queues - 1],
- NULL, 1);
- if (ret != 1)
- rte_panic("Unable to link Tx adapter port to Tx queue:err=%d\n",
- ret);
-
-.. _l2_fwd_event_app_adapter_init:
+.. literalinclude:: ../../../examples/l2fwd-event/l2fwd_event_generic.c
+ :language: c
+ :start-after: Extra port created. 8<
+ :end-before: >8 End of extra port created.
+ :dedent: 1
Rx/Tx adapter Initialization
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -434,57 +309,11 @@ Ethernet port's Rx queues are connected to its respective event queue at
priority 0 via Rx adapter configuration and Ethernet port's tx queues are
connected via Tx adapter.
-.. code-block:: c
-
- RTE_ETH_FOREACH_DEV(port_id) {
- if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
- continue;
- ret = rte_event_eth_rx_adapter_create(adapter_id, event_d_id,
- &evt_rsrc->def_p_conf);
- if (ret)
- rte_panic("Failed to create rx adapter[%d]\n",
- adapter_id);
-
- /* Configure user requested sched type*/
- eth_q_conf.ev.sched_type = rsrc->sched_type;
- eth_q_conf.ev.queue_id = evt_rsrc->evq.event_q_id[q_id];
- ret = rte_event_eth_rx_adapter_queue_add(adapter_id, port_id,
- -1, ð_q_conf);
- if (ret)
- rte_panic("Failed to add queues to Rx adapter\n");
-
- ret = rte_event_eth_rx_adapter_start(adapter_id);
- if (ret)
- rte_panic("Rx adapter[%d] start Failed\n", adapter_id);
-
- evt_rsrc->rx_adptr.rx_adptr[adapter_id] = adapter_id;
- adapter_id++;
- if (q_id < evt_rsrc->evq.nb_queues)
- q_id++;
- }
-
- adapter_id = 0;
- RTE_ETH_FOREACH_DEV(port_id) {
- if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
- continue;
- ret = rte_event_eth_tx_adapter_create(adapter_id, event_d_id,
- &evt_rsrc->def_p_conf);
- if (ret)
- rte_panic("Failed to create tx adapter[%d]\n",
- adapter_id);
-
- ret = rte_event_eth_tx_adapter_queue_add(adapter_id, port_id,
- -1);
- if (ret)
- rte_panic("Failed to add queues to Tx adapter\n");
-
- ret = rte_event_eth_tx_adapter_start(adapter_id);
- if (ret)
- rte_panic("Tx adapter[%d] start Failed\n", adapter_id);
-
- evt_rsrc->tx_adptr.tx_adptr[adapter_id] = adapter_id;
- adapter_id++;
- }
+.. literalinclude:: ../../../examples/l2fwd-event/l2fwd_event_internal_port.c
+ :language: c
+ :start-after: Assigned ethernet port. 8<
+ :end-before: >8 End of assigned ethernet port.
+ :dedent: 1
For S/W scheduler instead of dedicated adapters, common Rx/Tx adapters are
configured which will be shared among all the Ethernet ports. Also DPDK library
@@ -492,37 +321,11 @@ need service cores to run internal services for Rx/Tx adapters. Application gets
service id for Rx/Tx adapters and after successful setup it runs the services
on dedicated service cores.
-.. code-block:: c
-
- for (i = 0; i < evt_rsrc->rx_adptr.nb_rx_adptr; i++) {
- ret = rte_event_eth_rx_adapter_caps_get(evt_rsrc->event_d_id,
- evt_rsrc->rx_adptr.rx_adptr[i], &caps);
- if (ret < 0)
- rte_panic("Failed to get Rx adapter[%d] caps\n",
- evt_rsrc->rx_adptr.rx_adptr[i]);
- ret = rte_event_eth_rx_adapter_service_id_get(
- evt_rsrc->event_d_id,
- &service_id);
- if (ret != -ESRCH && ret != 0)
- rte_panic("Error in starting Rx adapter[%d] service\n",
- evt_rsrc->rx_adptr.rx_adptr[i]);
- l2fwd_event_service_enable(service_id);
- }
-
- for (i = 0; i < evt_rsrc->tx_adptr.nb_tx_adptr; i++) {
- ret = rte_event_eth_tx_adapter_caps_get(evt_rsrc->event_d_id,
- evt_rsrc->tx_adptr.tx_adptr[i], &caps);
- if (ret < 0)
- rte_panic("Failed to get Rx adapter[%d] caps\n",
- evt_rsrc->tx_adptr.tx_adptr[i]);
- ret = rte_event_eth_tx_adapter_service_id_get(
- evt_rsrc->event_d_id,
- &service_id);
- if (ret != -ESRCH && ret != 0)
- rte_panic("Error in starting Rx adapter[%d] service\n",
- evt_rsrc->tx_adptr.tx_adptr[i]);
- l2fwd_event_service_enable(service_id);
- }
+.. literalinclude:: ../../../examples/l2fwd-event/l2fwd_event.c
+ :language: c
+ :start-after: Gets service ID for RX/TX adapters. 8<
+ :end-before: >8 End of get service ID for RX/TX adapters.
+ :dedent: 1
.. _l2_fwd_event_app_rx_tx_packets:
@@ -532,23 +335,11 @@ Receive, Process and Transmit Packets
In the **l2fwd_main_loop()** function, the main task is to read ingress packets from
the RX queues. This is done using the following code:
-.. code-block:: c
-
- /*
- * Read packet from RX queues
- */
-
- for (i = 0; i < qconf->n_rx_port; i++) {
- portid = qconf->rx_port_list[i];
- nb_rx = rte_eth_rx_burst((uint8_t) portid, 0, pkts_burst,
- MAX_PKT_BURST);
-
- for (j = 0; j < nb_rx; j++) {
- m = pkts_burst[j];
- rte_prefetch0(rte_pktmbuf_mtod(m, void *));
- l2fwd_simple_forward(m, portid);
- }
- }
+.. literalinclude:: ../../../examples/l2fwd-event/l2fwd_poll.c
+ :language: c
+ :start-after: Reading ingress packets. 8<
+ :end-before: >8 End of reading ingress packets.
+ :dedent: 2
Packets are read in a burst of size MAX_PKT_BURST. The rte_eth_rx_burst()
function writes the mbuf pointers in a local table and returns the number of
@@ -570,25 +361,10 @@ Also to optimize enqueue operation, l2fwd_simple_forward() stores incoming mbufs
up to MAX_PKT_BURST. Once it reaches up to limit, all packets are transmitted to
destination ports.
-.. code-block:: c
-
- static void
- l2fwd_simple_forward(struct rte_mbuf *m, uint32_t portid)
- {
- uint32_t dst_port;
- int32_t sent;
- struct rte_eth_dev_tx_buffer *buffer;
-
- dst_port = l2fwd_dst_ports[portid];
-
- if (mac_updating)
- l2fwd_mac_updating(m, dst_port);
-
- buffer = tx_buffer[dst_port];
- sent = rte_eth_tx_buffer(dst_port, 0, buffer, m);
- if (sent)
- port_statistics[dst_port].tx += sent;
- }
+.. literalinclude:: ../../../examples/l2fwd/main.c
+ :language: c
+ :start-after: Simple forward. 8<
+ :end-before: >8 End of simple forward.
For this test application, the processing is exactly the same for all packets
arriving on the same RX port. Therefore, it would have been possible to call
@@ -605,60 +381,20 @@ To ensure that no packets remain in the tables, each lcore does a draining of TX
queue in its main loop. This technique introduces some latency when there are
not many packets to send, however it improves performance:
-.. code-block:: c
-
- cur_tsc = rte_rdtsc();
-
- /*
- * TX burst queue drain
- */
- diff_tsc = cur_tsc - prev_tsc;
- if (unlikely(diff_tsc > drain_tsc)) {
- for (i = 0; i < qconf->n_rx_port; i++) {
- portid = l2fwd_dst_ports[qconf->rx_port_list[i]];
- buffer = tx_buffer[portid];
- sent = rte_eth_tx_buffer_flush(portid, 0,
- buffer);
- if (sent)
- port_statistics[portid].tx += sent;
- }
-
- /* if timer is enabled */
- if (timer_period > 0) {
- /* advance the timer */
- timer_tsc += diff_tsc;
-
- /* if timer has reached its timeout */
- if (unlikely(timer_tsc >= timer_period)) {
- /* do this only on main core */
- if (lcore_id == rte_get_main_lcore()) {
- print_stats();
- /* reset the timer */
- timer_tsc = 0;
- }
- }
- }
-
- prev_tsc = cur_tsc;
- }
+.. literalinclude:: ../../../examples/l2fwd-event/l2fwd_poll.c
+ :language: c
+ :start-after: Draining TX queue in main loop. 8<
+ :end-before: >8 End of draining TX queue in main loop.
+ :dedent: 2
In the **l2fwd_event_loop()** function, the main task is to read ingress
packets from the event ports. This is done using the following code:
-.. code-block:: c
-
- /* Read packet from eventdev */
- nb_rx = rte_event_dequeue_burst(event_d_id, event_p_id,
- events, deq_len, 0);
- if (nb_rx == 0) {
- rte_pause();
- continue;
- }
-
- for (i = 0; i < nb_rx; i++) {
- mbuf[i] = events[i].mbuf;
- rte_prefetch0(rte_pktmbuf_mtod(mbuf[i], void *));
- }
+.. literalinclude:: ../../../examples/l2fwd-event/l2fwd_event.c
+ :language: c
+ :start-after: Read packet from eventdev. 8<
+ :end-before: >8 End of reading packets from eventdev.
+ :dedent: 2
Before reading packets, deq_len is fetched to ensure correct allowed deq length
@@ -682,11 +418,8 @@ l2fwd_eventdev_forward() does not stores incoming mbufs. Packet will forwarded
be to destination ports via Tx adapter or generic event dev enqueue API
depending H/W or S/W scheduler is used.
-.. code-block:: c
-
- nb_tx = rte_event_eth_tx_adapter_enqueue(event_d_id, port_id, ev,
- nb_rx);
- while (nb_tx < nb_rx && !rsrc->force_quit)
- nb_tx += rte_event_eth_tx_adapter_enqueue(
- event_d_id, port_id,
- ev + nb_tx, nb_rx - nb_tx);
+.. literalinclude:: ../../../examples/l2fwd-event/l2fwd_event.c
+ :language: c
+ :start-after: Read packet from eventdev. 8<
+ :end-before: >8 End of reading packets from eventdev.
+ :dedent: 2
@@ -122,22 +122,11 @@ Refer to the *glibc getopt(3)* man page for details.
EAL arguments are parsed first, then application-specific arguments.
This is done at the beginning of the main() function:
-.. code-block:: c
-
- /* init EAL */
-
- ret = rte_eal_init(argc, argv);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
-
- argc -= ret;
- argv += ret;
-
- /* parse application arguments (after the EAL ones) */
-
- ret = l2fwd_parse_args(argc, argv);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Invalid L2FWD arguments\n");
+.. literalinclude:: ../../../examples/l2fwd-jobstats/main.c
+ :language: c
+ :start-after: Init EAL. 8<
+ :end-before: >8 End of init EAL.
+ :dedent: 1
Mbuf Pool Initialization
~~~~~~~~~~~~~~~~~~~~~~~~
@@ -146,15 +135,11 @@ Once the arguments are parsed, the mbuf pool is created.
The mbuf pool contains a set of mbuf objects that will be used by the driver
and the application to store network packet data:
-.. code-block:: c
-
- /* create the mbuf pool */
- l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF,
- MEMPOOL_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE,
- rte_socket_id());
-
- if (l2fwd_pktmbuf_pool == NULL)
- rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n");
+.. literalinclude:: ../../../examples/l2fwd-jobstats/main.c
+ :language: c
+ :start-after: Create the mbuf pool. 8<
+ :end-before: >8 End of creation of mbuf pool.
+ :dedent: 1
The rte_mempool is a generic structure used to handle pools of objects.
In this case, it is necessary to create a pool that will be used by the driver.
@@ -176,47 +161,22 @@ The main part of the code in the main() function relates to the initialization o
To fully understand this code, it is recommended to study the chapters that related to the Poll Mode Driver
in the *DPDK Programmer's Guide* and the *DPDK API Reference*.
-.. code-block:: c
-
- /* reset l2fwd_dst_ports */
-
- for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++)
- l2fwd_dst_ports[portid] = 0;
-
- last_port = 0;
-
- /*
- * Each logical core is assigned a dedicated TX queue on each port.
- */
- RTE_ETH_FOREACH_DEV(portid) {
- /* skip ports that are not enabled */
- if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
- continue;
-
- if (nb_ports_in_mask % 2) {
- l2fwd_dst_ports[portid] = last_port;
- l2fwd_dst_ports[last_port] = portid;
- }
- else
- last_port = portid;
-
- nb_ports_in_mask++;
-
- rte_eth_dev_info_get((uint8_t) portid, &dev_info);
- }
+.. literalinclude:: ../../../examples/l2fwd-jobstats/main.c
+ :language: c
+ :start-after: Reset l2fwd_dst_ports. 8<
+ :end-before: >8 End of reset l2fwd_dst_ports.
+ :dedent: 1
The next step is to configure the RX and TX queues.
For each port, there is only one RX queue (only one lcore is able to poll a given port).
The number of TX queues depends on the number of available lcores.
The rte_eth_dev_configure() function is used to configure the number of queues for a port:
-.. code-block:: c
-
- ret = rte_eth_dev_configure((uint8_t)portid, 1, 1, &port_conf);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Cannot configure device: "
- "err=%d, port=%u\n",
- ret, portid);
+.. literalinclude:: ../../../examples/l2fwd-jobstats/main.c
+ :language: c
+ :start-after: Configure the RX and TX queues. 8<
+ :end-before: >8 End of configuring the RX and TX queues.
+ :dedent: 2
RX Queue Initialization
~~~~~~~~~~~~~~~~~~~~~~~
@@ -228,37 +188,18 @@ For example, if the user specifies -q 4, the application is able to poll four po
If there are 16 ports on the target (and if the portmask argument is -p ffff ),
the application will need four lcores to poll all the ports.
-.. code-block:: c
-
- ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
- rte_eth_dev_socket_id(portid),
- NULL,
- l2fwd_pktmbuf_pool);
-
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup:err=%d, port=%u\n",
- ret, (unsigned) portid);
+.. literalinclude:: ../../../examples/l2fwd-jobstats/main.c
+ :language: c
+ :start-after: RX queue initialization. 8<
+ :end-before: >8 End of RX queue initialization.
+ :dedent: 2
The list of queues that must be polled for a given lcore is stored in a private structure called struct lcore_queue_conf.
-.. code-block:: c
-
- struct lcore_queue_conf {
- unsigned n_rx_port;
- unsigned rx_port_list[MAX_RX_QUEUE_PER_LCORE];
- truct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS];
-
- struct rte_timer rx_timers[MAX_RX_QUEUE_PER_LCORE];
- struct rte_jobstats port_fwd_jobs[MAX_RX_QUEUE_PER_LCORE];
-
- struct rte_timer flush_timer;
- struct rte_jobstats flush_job;
- struct rte_jobstats idle_job;
- struct rte_jobstats_context jobs_context;
-
- rte_atomic16_t stats_read_pending;
- rte_spinlock_t lock;
- } __rte_cache_aligned;
+.. literalinclude:: ../../../examples/l2fwd-jobstats/main.c
+ :language: c
+ :start-after: List of queues to be polled for given lcore. 8<
+ :end-before: >8 End of list of queues to be polled for given lcore.
Values of struct lcore_queue_conf:
@@ -276,17 +217,11 @@ TX Queue Initialization
Each lcore should be able to transmit on any port. For every port, a single TX queue is initialized.
-.. code-block:: c
-
- /* init one TX queue on each port */
-
- fflush(stdout);
- ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
- rte_eth_dev_socket_id(portid),
- NULL);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup:err=%d, port=%u\n",
- ret, (unsigned) portid);
+.. literalinclude:: ../../../examples/l2fwd-jobstats/main.c
+ :language: c
+ :start-after: Init one TX queue on each port. 8<
+ :end-before: >8 End of init one TX queue on each port.
+ :dedent: 2
Jobs statistics initialization
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -294,35 +229,19 @@ There are several statistics objects available:
* Flush job statistics
-.. code-block:: c
-
- rte_jobstats_init(&qconf->flush_job, "flush", drain_tsc, drain_tsc,
- drain_tsc, 0);
-
- rte_timer_init(&qconf->flush_timer);
- ret = rte_timer_reset(&qconf->flush_timer, drain_tsc, PERIODICAL,
- lcore_id, &l2fwd_flush_job, NULL);
-
- if (ret < 0) {
- rte_exit(1, "Failed to reset flush job timer for lcore %u: %s",
- lcore_id, rte_strerror(-ret));
- }
+.. literalinclude:: ../../../examples/l2fwd-jobstats/main.c
+ :language: c
+ :start-after: Add flush job. 8<
+ :end-before: >8 End of add flush job.
+ :dedent: 2
* Statistics per RX port
-.. code-block:: c
-
- rte_jobstats_init(job, name, 0, drain_tsc, 0, MAX_PKT_BURST);
- rte_jobstats_set_update_period_function(job, l2fwd_job_update_cb);
-
- rte_timer_init(&qconf->rx_timers[i]);
- ret = rte_timer_reset(&qconf->rx_timers[i], 0, PERIODICAL, lcore_id,
- l2fwd_fwd_job, (void *)(uintptr_t)i);
-
- if (ret < 0) {
- rte_exit(1, "Failed to reset lcore %u port %u job timer: %s",
- lcore_id, qconf->rx_port_list[i], rte_strerror(-ret));
- }
+.. literalinclude:: ../../../examples/l2fwd-jobstats/main.c
+ :language: c
+ :start-after: Setup forward job. 8<
+ :end-before: >8 End of forward job.
+ :dedent: 3
Following parameters are passed to rte_jobstats_init():
@@ -338,43 +257,11 @@ Main loop
The forwarding path is reworked comparing to original L2 Forwarding application.
In the l2fwd_main_loop() function three loops are placed.
-.. code-block:: c
-
- for (;;) {
- rte_spinlock_lock(&qconf->lock);
-
- do {
- rte_jobstats_context_start(&qconf->jobs_context);
-
- /* Do the Idle job:
- * - Read stats_read_pending flag
- * - check if some real job need to be executed
- */
- rte_jobstats_start(&qconf->jobs_context, &qconf->idle_job);
-
- do {
- uint8_t i;
- uint64_t now = rte_get_timer_cycles();
-
- need_manage = qconf->flush_timer.expire < now;
- /* Check if we was esked to give a stats. */
- stats_read_pending =
- rte_atomic16_read(&qconf->stats_read_pending);
- need_manage |= stats_read_pending;
-
- for (i = 0; i < qconf->n_rx_port && !need_manage; i++)
- need_manage = qconf->rx_timers[i].expire < now;
-
- } while (!need_manage);
- rte_jobstats_finish(&qconf->idle_job, qconf->idle_job.target);
-
- rte_timer_manage();
- rte_jobstats_context_finish(&qconf->jobs_context);
- } while (likely(stats_read_pending == 0));
-
- rte_spinlock_unlock(&qconf->lock);
- rte_pause();
- }
+.. literalinclude:: ../../../examples/l2fwd-jobstats/main.c
+ :language: c
+ :start-after: Minimize impact of stats reading. 8<
+ :end-before: >8 End of minimize impact of stats reading.
+ :dedent: 1
First infinite for loop is to minimize impact of stats reading. Lock is only locked/unlocked when asked.
@@ -392,16 +279,11 @@ Receive, Process and Transmit Packets
The main task of l2fwd_fwd_job() function is to read ingress packets from the RX queue of particular port and forward it.
This is done using the following code:
-.. code-block:: c
-
- total_nb_rx = rte_eth_rx_burst((uint8_t) portid, 0, pkts_burst,
- MAX_PKT_BURST);
-
- for (j = 0; j < total_nb_rx; j++) {
- m = pkts_burst[j];
- rte_prefetch0(rte_pktmbuf_mtod(m, void *));
- l2fwd_simple_forward(m, portid);
- }
+.. literalinclude:: ../../../examples/l2fwd-jobstats/main.c
+ :language: c
+ :start-after: Call rx burst 2 times. 8<
+ :end-before: >8 End of call rx burst 2 times.
+ :dedent: 1
Packets are read in a burst of size MAX_PKT_BURST.
Then, each mbuf in the table is processed by the l2fwd_simple_forward() function.
@@ -411,29 +293,19 @@ The rte_eth_rx_burst() function writes the mbuf pointers in a local table and re
After first read second try is issued.
-.. code-block:: c
-
- if (total_nb_rx == MAX_PKT_BURST) {
- const uint16_t nb_rx = rte_eth_rx_burst((uint8_t) portid, 0, pkts_burst,
- MAX_PKT_BURST);
-
- total_nb_rx += nb_rx;
- for (j = 0; j < nb_rx; j++) {
- m = pkts_burst[j];
- rte_prefetch0(rte_pktmbuf_mtod(m, void *));
- l2fwd_simple_forward(m, portid);
- }
- }
+.. literalinclude:: ../../../examples/l2fwd-jobstats/main.c
+ :language: c
+ :start-after: Read second try. 8<
+ :end-before: >8 End of read second try.
+ :dedent: 1
This second read is important to give job stats library a feedback how many packets was processed.
-.. code-block:: c
-
- /* Adjust period time in which we are running here. */
- if (rte_jobstats_finish(job, total_nb_rx) != 0) {
- rte_timer_reset(&qconf->rx_timers[port_idx], job->period, PERIODICAL,
- lcore_id, l2fwd_fwd_job, arg);
- }
+.. literalinclude:: ../../../examples/l2fwd-jobstats/main.c
+ :language: c
+ :start-after: Adjust period time in which we are running here. 8<
+ :end-before: >8 End of adjust period time in which we are running.
+ :dedent: 1
To maximize performance exactly MAX_PKT_BURST is expected (the target value) to be read for each l2fwd_fwd_job() call.
If total_nb_rx is smaller than target value job->period will be increased. If it is greater the period will be decreased.
@@ -446,31 +318,10 @@ During the initialization process, a static array of destination ports (l2fwd_ds
a destination port is assigned that is either the next or previous enabled port from the portmask.
Naturally, the number of ports in the portmask must be even, otherwise, the application exits.
-.. code-block:: c
-
- static void
- l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid)
- {
- struct rte_ether_hdr *eth;
- void *tmp;
- unsigned dst_port;
-
- dst_port = l2fwd_dst_ports[portid];
-
- eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
-
- /* 02:00:00:00:00:xx */
-
- tmp = ð->d_addr.addr_bytes[0];
-
- *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t) dst_port << 40);
-
- /* src addr */
-
- rte_ether_addr_copy(&l2fwd_ports_eth_addr[dst_port], ð->s_addr);
-
- l2fwd_send_packet(m, (uint8_t) dst_port);
- }
+.. literalinclude:: ../../../examples/l2fwd-jobstats/main.c
+ :language: c
+ :start-after: Start of l2fwd_simple_forward. 8<
+ :end-before: >8 End of l2fwd_simple_forward.
Then, the packet is sent using the l2fwd_send_packet (m, dst_port) function.
For this test application, the processing is exactly the same for all packets arriving on the same RX port.
@@ -485,66 +336,17 @@ The application is implemented to illustrate that, so the same approach can be r
The l2fwd_send_packet() function stores the packet in a per-lcore and per-txport table.
If the table is full, the whole packets table is transmitted using the l2fwd_send_burst() function:
-.. code-block:: c
-
- /* Send the packet on an output interface */
-
- static int
- l2fwd_send_packet(struct rte_mbuf *m, uint16_t port)
- {
- unsigned lcore_id, len;
- struct lcore_queue_conf *qconf;
-
- lcore_id = rte_lcore_id();
- qconf = &lcore_queue_conf[lcore_id];
- len = qconf->tx_mbufs[port].len;
- qconf->tx_mbufs[port].m_table[len] = m;
- len++;
-
- /* enough pkts to be sent */
-
- if (unlikely(len == MAX_PKT_BURST)) {
- l2fwd_send_burst(qconf, MAX_PKT_BURST, port);
- len = 0;
- }
-
- qconf->tx_mbufs[port].len = len; return 0;
- }
+.. literalinclude:: ../../../examples/l2fwd-crypto/main.c
+ :language: c
+ :start-after: Enqueue packets for TX and prepare them to be sent. 8<
+ :end-before: >8 End of Enqueuing packets for TX.
To ensure that no packets remain in the tables, the flush job exists. The l2fwd_flush_job()
is called periodically to for each lcore draining TX queue of each port.
This technique introduces some latency when there are not many packets to send,
however it improves performance:
-.. code-block:: c
-
- static void
- l2fwd_flush_job(__rte_unused struct rte_timer *timer, __rte_unused void *arg)
- {
- uint64_t now;
- unsigned lcore_id;
- struct lcore_queue_conf *qconf;
- struct mbuf_table *m_table;
- uint16_t portid;
-
- lcore_id = rte_lcore_id();
- qconf = &lcore_queue_conf[lcore_id];
-
- rte_jobstats_start(&qconf->jobs_context, &qconf->flush_job);
-
- now = rte_get_timer_cycles();
- lcore_id = rte_lcore_id();
- qconf = &lcore_queue_conf[lcore_id];
- for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
- m_table = &qconf->tx_mbufs[portid];
- if (m_table->len == 0 || m_table->next_flush_time <= now)
- continue;
-
- l2fwd_send_burst(qconf, portid);
- }
-
-
- /* Pass target to indicate that this job is happy of time interval
- * in which it was called. */
- rte_jobstats_finish(&qconf->flush_job, qconf->flush_job.target);
- }
+.. literalinclude:: ../../../examples/l2fwd-jobstats/main.c
+ :language: c
+ :start-after: Draining TX queue of each port. 8<
+ :end-before: >8 End of draining TX queue of each port.
@@ -146,22 +146,11 @@ Refer to the *glibc getopt(3)* man page for details.
EAL arguments are parsed first, then application-specific arguments.
This is done at the beginning of the main() function:
-.. code-block:: c
-
- /* init EAL */
-
- ret = rte_eal_init(argc, argv);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
-
- argc -= ret;
- argv += ret;
-
- /* parse application arguments (after the EAL ones) */
-
- ret = l2fwd_parse_args(argc, argv);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Invalid L2FWD arguments\n");
+.. literalinclude:: ../../../examples/l2fwd/main.c
+ :language: c
+ :start-after: Init EAL. 8<
+ :end-before: >8 End of init EAL.
+ :dedent: 1
.. _l2_fwd_app_mbuf_init:
@@ -172,16 +161,11 @@ Once the arguments are parsed, the mbuf pool is created.
The mbuf pool contains a set of mbuf objects that will be used by the driver
and the application to store network packet data:
-.. code-block:: c
-
- /* create the mbuf pool */
-
- l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF,
- MEMPOOL_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE,
- rte_socket_id());
-
- if (l2fwd_pktmbuf_pool == NULL)
- rte_panic("Cannot init mbuf pool\n");
+.. literalinclude:: ../../../examples/l2fwd/main.c
+ :language: c
+ :start-after: Create the mbuf pool. 8<
+ :end-before: >8 End of create the mbuf pool.
+ :dedent: 1
The rte_mempool is a generic structure used to handle pools of objects.
In this case, it is necessary to create a pool that will be used by the driver.
@@ -205,49 +189,22 @@ The main part of the code in the main() function relates to the initialization o
To fully understand this code, it is recommended to study the chapters that related to the Poll Mode Driver
in the *DPDK Programmer's Guide* - Rel 1.4 EAR and the *DPDK API Reference*.
-.. code-block:: c
-
- /* reset l2fwd_dst_ports */
-
- for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++)
- l2fwd_dst_ports[portid] = 0;
-
- last_port = 0;
-
- /*
- * Each logical core is assigned a dedicated TX queue on each port.
- */
-
- RTE_ETH_FOREACH_DEV(portid) {
- /* skip ports that are not enabled */
-
- if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
- continue;
-
- if (nb_ports_in_mask % 2) {
- l2fwd_dst_ports[portid] = last_port;
- l2fwd_dst_ports[last_port] = portid;
- }
- else
- last_port = portid;
-
- nb_ports_in_mask++;
-
- rte_eth_dev_info_get((uint8_t) portid, &dev_info);
- }
+.. literalinclude:: ../../../examples/l2fwd/main.c
+ :language: c
+ :start-after: Initialization of the driver. 8<
+ :end-before: >8 End of initialization of the driver.
+ :dedent: 1
The next step is to configure the RX and TX queues.
For each port, there is only one RX queue (only one lcore is able to poll a given port).
The number of TX queues depends on the number of available lcores.
The rte_eth_dev_configure() function is used to configure the number of queues for a port:
-.. code-block:: c
-
- ret = rte_eth_dev_configure((uint8_t)portid, 1, 1, &port_conf);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Cannot configure device: "
- "err=%d, port=%u\n",
- ret, portid);
+.. literalinclude:: ../../../examples/l2fwd/main.c
+ :language: c
+ :start-after: Configure the number of queues for a port.
+ :end-before: >8 End of configuration of the number of queues for a port.
+ :dedent: 2
.. _l2_fwd_app_rx_init:
@@ -261,26 +218,18 @@ For example, if the user specifies -q 4, the application is able to poll four po
If there are 16 ports on the target (and if the portmask argument is -p ffff ),
the application will need four lcores to poll all the ports.
-.. code-block:: c
-
- ret = rte_eth_rx_queue_setup((uint8_t) portid, 0, nb_rxd, SOCKET0, &rx_conf, l2fwd_pktmbuf_pool);
- if (ret < 0)
-
- rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: "
- "err=%d, port=%u\n",
- ret, portid);
+.. literalinclude:: ../../../examples/l2fwd/main.c
+ :language: c
+ :start-after: RX queue setup. 8<
+ :end-before: >8 End of RX queue setup.
+ :dedent: 2
The list of queues that must be polled for a given lcore is stored in a private structure called struct lcore_queue_conf.
-.. code-block:: c
-
- struct lcore_queue_conf {
- unsigned n_rx_port;
- unsigned rx_port_list[MAX_RX_QUEUE_PER_LCORE];
- struct mbuf_table tx_mbufs[L2FWD_MAX_PORTS];
- } rte_cache_aligned;
-
- struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
+.. literalinclude:: ../../../examples/l2fwd/main.c
+ :language: c
+ :start-after: List of queues to be polled for a given lcore. 8<
+ :end-before: >8 End of list of queues to be polled for a given lcore.
The values n_rx_port and rx_port_list[] are used in the main packet processing loop
(see :ref:`l2_fwd_app_rx_tx_packets`).
@@ -292,28 +241,11 @@ TX Queue Initialization
Each lcore should be able to transmit on any port. For every port, a single TX queue is initialized.
-.. code-block:: c
-
- /* init one TX queue on each port */
-
- fflush(stdout);
-
- ret = rte_eth_tx_queue_setup((uint8_t) portid, 0, nb_txd, rte_eth_dev_socket_id(portid), &tx_conf);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup:err=%d, port=%u\n", ret, (unsigned) portid);
-
-The global configuration for TX queues is stored in a static structure:
-
-.. code-block:: c
-
- static const struct rte_eth_txconf tx_conf = {
- .tx_thresh = {
- .pthresh = TX_PTHRESH,
- .hthresh = TX_HTHRESH,
- .wthresh = TX_WTHRESH,
- },
- .tx_free_thresh = RTE_TEST_TX_DESC_DEFAULT + 1, /* disable feature */
- };
+.. literalinclude:: ../../../examples/l2fwd/main.c
+ :language: c
+ :start-after: Init one TX queue on each port. 8<
+ :end-before: >8 End of init one TX queue on each port.
+ :dedent: 2
.. _l2_fwd_app_rx_tx_packets:
@@ -323,21 +255,11 @@ Receive, Process and Transmit Packets
In the l2fwd_main_loop() function, the main task is to read ingress packets from the RX queues.
This is done using the following code:
-.. code-block:: c
-
- /*
- * Read packet from RX queues
- */
-
- for (i = 0; i < qconf->n_rx_port; i++) {
- portid = qconf->rx_port_list[i];
- nb_rx = rte_eth_rx_burst((uint8_t) portid, 0, pkts_burst, MAX_PKT_BURST);
-
- for (j = 0; j < nb_rx; j++) {
- m = pkts_burst[j];
- rte_prefetch0[rte_pktmbuf_mtod(m, void *)); l2fwd_simple_forward(m, portid);
- }
- }
+.. literalinclude:: ../../../examples/l2fwd/main.c
+ :language: c
+ :start-after: Read packet from RX queues. 8<
+ :end-before: >8 End of read packet from RX queues.
+ :dedent: 2
Packets are read in a burst of size MAX_PKT_BURST.
The rte_eth_rx_burst() function writes the mbuf pointers in a local table and returns the number of available mbufs in the table.
@@ -354,31 +276,11 @@ During the initialization process, a static array of destination ports (l2fwd_ds
a destination port is assigned that is either the next or previous enabled port from the portmask.
Naturally, the number of ports in the portmask must be even, otherwise, the application exits.
-.. code-block:: c
-
- static void
- l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid)
- {
- struct rte_ether_hdr *eth;
- void *tmp;
- unsigned dst_port;
-
- dst_port = l2fwd_dst_ports[portid];
-
- eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
-
- /* 02:00:00:00:00:xx */
-
- tmp = ð->d_addr.addr_bytes[0];
-
- *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t) dst_port << 40);
+.. literalinclude:: ../../../examples/l2fwd/main.c
+ :language: c
+ :start-after: Simple forward. 8<
+ :end-before: >8 End of simple forward.
- /* src addr */
-
- rte_ether_addr_copy(&l2fwd_ports_eth_addr[dst_port], ð->s_addr);
-
- l2fwd_send_packet(m, (uint8_t) dst_port);
- }
Then, the packet is sent using the l2fwd_send_packet (m, dst_port) function.
For this test application, the processing is exactly the same for all packets arriving on the same RX port.
@@ -393,75 +295,17 @@ The application is implemented to illustrate that, so the same approach can be r
The l2fwd_send_packet() function stores the packet in a per-lcore and per-txport table.
If the table is full, the whole packets table is transmitted using the l2fwd_send_burst() function:
-.. code-block:: c
-
- /* Send the packet on an output interface */
-
- static int
- l2fwd_send_packet(struct rte_mbuf *m, uint16_t port)
- {
- unsigned lcore_id, len;
- struct lcore_queue_conf *qconf;
-
- lcore_id = rte_lcore_id();
- qconf = &lcore_queue_conf[lcore_id];
- len = qconf->tx_mbufs[port].len;
- qconf->tx_mbufs[port].m_table[len] = m;
- len++;
-
- /* enough pkts to be sent */
-
- if (unlikely(len == MAX_PKT_BURST)) {
- l2fwd_send_burst(qconf, MAX_PKT_BURST, port);
- len = 0;
- }
-
- qconf->tx_mbufs[port].len = len; return 0;
- }
+.. literalinclude:: ../../../examples/l2fwd-crypto/main.c
+ :language: c
+ :start-after: Enqueue packets for TX and prepare them to be sent. 8<
+ :end-before: >8 End of Enqueuing packets for TX.
To ensure that no packets remain in the tables, each lcore does a draining of TX queue in its main loop.
This technique introduces some latency when there are not many packets to send,
however it improves performance:
-.. code-block:: c
-
- cur_tsc = rte_rdtsc();
-
- /*
- * TX burst queue drain
- */
-
- diff_tsc = cur_tsc - prev_tsc;
-
- if (unlikely(diff_tsc > drain_tsc)) {
- for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
- if (qconf->tx_mbufs[portid].len == 0)
- continue;
-
- l2fwd_send_burst(&lcore_queue_conf[lcore_id], qconf->tx_mbufs[portid].len, (uint8_t) portid);
-
- qconf->tx_mbufs[portid].len = 0;
- }
-
- /* if timer is enabled */
-
- if (timer_period > 0) {
- /* advance the timer */
-
- timer_tsc += diff_tsc;
-
- /* if timer has reached its timeout */
-
- if (unlikely(timer_tsc >= (uint64_t) timer_period)) {
- /* do this only on main core */
- if (lcore_id == rte_get_main_lcore()) {
- print_stats();
-
- /* reset the timer */
- timer_tsc = 0;
- }
- }
- }
-
- prev_tsc = cur_tsc;
- }
+.. literalinclude:: ../../../examples/l2fwd/main.c
+ :language: c
+ :start-after: Drains TX queue in its main loop. 8<
+ :end-before: >8 End of draining TX queue.
+ :dedent: 2
@@ -266,48 +266,10 @@ LPM Initialization
The LPM object is created and loaded with the pre-configured entries read from a global array.
-.. code-block:: c
-
- #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
-
- static void
- setup_lpm(int socketid)
- {
- unsigned i;
- int ret;
- char s[64];
-
- /* create the LPM table */
-
- snprintf(s, sizeof(s), "IPV4_L3FWD_LPM_%d", socketid);
-
- ipv4_l3fwd_lookup_struct[socketid] = rte_lpm_create(s, socketid, IPV4_L3FWD_LPM_MAX_RULES, 0);
-
- if (ipv4_l3fwd_lookup_struct[socketid] == NULL)
- rte_exit(EXIT_FAILURE, "Unable to create the l3fwd LPM table"
- " on socket %d\n", socketid);
-
- /* populate the LPM table */
-
- for (i = 0; i < IPV4_L3FWD_NUM_ROUTES; i++) {
- /* skip unused ports */
-
- if ((1 << ipv4_l3fwd_route_array[i].if_out & enabled_port_mask) == 0)
- continue;
-
- ret = rte_lpm_add(ipv4_l3fwd_lookup_struct[socketid], ipv4_l3fwd_route_array[i].ip,
- ipv4_l3fwd_route_array[i].depth, ipv4_l3fwd_route_array[i].if_out);
-
- if (ret < 0) {
- rte_exit(EXIT_FAILURE, "Unable to add entry %u to the "
- "l3fwd LPM table on socket %d\n", i, socketid);
- }
-
- printf("LPM: Adding route 0x%08x / %d (%d)\n",
- (unsigned)ipv4_l3fwd_route_array[i].ip, ipv4_l3fwd_route_array[i].depth, ipv4_l3fwd_route_array[i].if_out);
- }
- }
- #endif
+.. literalinclude:: ../../../examples/l3fwd/l3fwd_em.c
+ :language: c
+ :start-after: Initialize exact match (hash) parameters. 8<
+ :end-before: >8 End of initialization of hash parameters.
FIB Initialization
~~~~~~~~~~~~~~~~~~
@@ -319,8 +281,8 @@ the full setup function including the IPv6 setup can be seen in the app code.
.. literalinclude:: ../../../examples/l3fwd/l3fwd_fib.c
:language: c
- :start-after: Function to setup fib.
- :end-before: Create the fib IPv6 table.
+ :start-after: Function to setup fib. 8<
+ :end-before: >8 End of setup fib.
Packet Forwarding for Hash-based Lookups
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -333,28 +295,10 @@ and the packet forwarding decision (that is, the identification of the output in
for hash-based lookups is done by the get_ipv4_dst_port() or get_ipv6_dst_port() function.
The get_ipv4_dst_port() function is shown below:
-.. code-block:: c
-
- static inline uint8_t
- get_ipv4_dst_port(void *ipv4_hdr, uint16_t portid, lookup_struct_t *ipv4_l3fwd_lookup_struct)
- {
- int ret = 0;
- union ipv4_5tuple_host key;
-
- ipv4_hdr = (uint8_t *)ipv4_hdr + offsetof(struct rte_ipv4_hdr, time_to_live);
-
- m128i data = _mm_loadu_si128(( m128i*)(ipv4_hdr));
-
- /* Get 5 tuple: dst port, src port, dst IP address, src IP address and protocol */
-
- key.xmm = _mm_and_si128(data, mask0);
-
- /* Find destination port */
-
- ret = rte_hash_lookup(ipv4_l3fwd_lookup_struct, (const void *)&key);
-
- return (uint8_t)((ret < 0)? portid : ipv4_l3fwd_out_if[ret]);
- }
+.. literalinclude:: ../../../examples/l3fwd/l3fwd_em.c
+ :language: c
+ :start-after: Performing hash-based lookups. 8<
+ :end-before: >8 End of performing hash-based lookups.
The get_ipv6_dst_port() function is similar to the get_ipv4_dst_port() function.
@@ -402,15 +346,10 @@ For each input packet, the packet forwarding operation is done by the l3fwd_simp
but the packet forwarding decision (that is, the identification of the output interface for the packet)
for LPM-based lookups is done by the get_ipv4_dst_port() function below:
-.. code-block:: c
-
- static inline uint16_t
- get_ipv4_dst_port(struct rte_ipv4_hdr *ipv4_hdr, uint16_t portid, lookup_struct_t *ipv4_l3fwd_lookup_struct)
- {
- uint8_t next_hop;
-
- return ((rte_lpm_lookup(ipv4_l3fwd_lookup_struct, rte_be_to_cpu_32(ipv4_hdr->dst_addr), &next_hop) == 0)? next_hop : portid);
- }
+.. literalinclude:: ../../../examples/l3fwd/l3fwd_lpm.c
+ :language: c
+ :start-after: Performing LPM-based lookups. 8<
+ :end-before: >8 End of performing LPM-based lookups.
Packet Forwarding for FIB-based Lookups
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -132,56 +132,11 @@ These cloned nodes along with existing static nodes such as ``ip4_lookup`` and
``ip4_rewrite`` will be used in graph creation to associate node's to lcore
specific graph object.
-.. code-block:: c
-
- RTE_ETH_FOREACH_DEV(portid)
- {
-
- /* ... */
- ret = rte_eth_dev_configure(portid, nb_rx_queue,
- n_tx_queue, &local_port_conf);
- /* ... */
-
- /* Init one TX queue per couple (lcore,port) */
- queueid = 0;
- for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
- /* ... */
- ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
- socketid, txconf);
- /* ... */
- queueid++;
- }
-
- /* Setup ethdev node config */
- ethdev_conf[nb_conf].port_id = portid;
- ethdev_conf[nb_conf].num_rx_queues = nb_rx_queue;
- ethdev_conf[nb_conf].num_tx_queues = n_tx_queue;
- if (!per_port_pool)
- ethdev_conf[nb_conf].mp = pktmbuf_pool[0];
- else
- ethdev_conf[nb_conf].mp = pktmbuf_pool[portid];
- ethdev_conf[nb_conf].mp_count = NB_SOCKETS;
-
- nb_conf++;
- printf("\n");
- }
-
- for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
- /* Init RX queues */
- for (queue = 0; queue < qconf->n_rx_queue; ++queue) {
- /* ... */
- if (!per_port_pool)
- ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd, socketid,
- &rxq_conf, pktmbuf_pool[0][socketid]);
- else
- ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd, socketid,
- &rxq_conf, pktmbuf_pool[portid][socketid]);
- /* ... */
- }
- }
-
- /* Ethdev node config, skip rx queue mapping */
- ret = rte_node_eth_config(ethdev_conf, nb_conf, nb_graphs);
+.. literalinclude:: ../../../examples/l3fwd-graph/main.c
+ :language: c
+ :start-after: Initialize all ports. 8<
+ :end-before: >8 End of graph creation.
+ :dedent: 1
Graph Initialization
~~~~~~~~~~~~~~~~~~~~
@@ -200,58 +155,11 @@ the application argument ``--config`` specifying rx queue mapping to lcore.
are not sufficient to meet their inter-dependency or even one node is not
found with a given regex node pattern.
-.. code-block:: c
-
- static const char *const default_patterns[] = {
- "ip4*",
- "ethdev_tx-*",
- "pkt_drop",
- };
- const char **node_patterns;
- uint16_t nb_pattern;
-
- /* ... */
-
- /* Create a graph object per lcore with common nodes and
- * lcore specific nodes based on application arguments
- */
- nb_patterns = RTE_DIM(default_patterns);
- node_patterns = malloc((MAX_RX_QUEUE_PER_LCORE + nb_patterns) *
- sizeof(*node_patterns));
- memcpy(node_patterns, default_patterns,
- nb_patterns * sizeof(*node_patterns));
-
- memset(&graph_conf, 0, sizeof(graph_conf));
-
- /* Common set of nodes in every lcore's graph object */
- graph_conf.node_patterns = node_patterns;
-
- for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
- /* ... */
-
- /* Skip graph creation if no source exists */
- if (!qconf->n_rx_queue)
- continue;
-
- /* Add rx node patterns of this lcore based on --config */
- for (i = 0; i < qconf->n_rx_queue; i++) {
- graph_conf.node_patterns[nb_patterns + i] =
- qconf->rx_queue_list[i].node_name;
- }
-
- graph_conf.nb_node_patterns = nb_patterns + i;
- graph_conf.socket_id = rte_lcore_to_socket_id(lcore_id);
-
- snprintf(qconf->name, sizeof(qconf->name), "worker_%u", lcore_id);
-
- graph_id = rte_graph_create(qconf->name, &graph_conf);
-
- /* ... */
-
- qconf->graph = rte_graph_lookup(qconf->name);
-
- /* ... */
- }
+.. literalinclude:: ../../../examples/l3fwd-graph/main.c
+ :language: c
+ :start-after: Graph initialization. 8<
+ :end-before: >8 End of graph initialization.
+ :dedent: 1
Forwarding data(Route, Next-Hop) addition
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -267,31 +175,11 @@ headers will be provided run-time using ``rte_node_ip4_route_add()`` and
rewrite data, forwarding data is added before packet processing loop is
launched on worker lcore.
-.. code-block:: c
-
- /* Add route to ip4 graph infra */
- for (i = 0; i < IPV4_L3FWD_LPM_NUM_ROUTES; i++) {
- /* ... */
-
- dst_port = ipv4_l3fwd_lpm_route_array[i].if_out;
- next_hop = i;
-
- /* ... */
- ret = rte_node_ip4_route_add(ipv4_l3fwd_lpm_route_array[i].ip,
- ipv4_l3fwd_lpm_route_array[i].depth, next_hop,
- RTE_NODE_IP4_LOOKUP_NEXT_REWRITE);
-
- /* ... */
-
- memcpy(rewrite_data, val_eth + dst_port, rewrite_len);
-
- /* Add next hop for a given destination */
- ret = rte_node_ip4_rewrite_add(next_hop, rewrite_data,
- rewrite_len, dst_port);
-
- RTE_LOG(INFO, L3FWD_GRAPH, "Added route %s, next_hop %u\n",
- route_str, next_hop);
- }
+.. literalinclude:: ../../../examples/l3fwd-graph/main.c
+ :language: c
+ :start-after: Add route to ip4 graph infra. 8<
+ :end-before: >8 End of adding route to ip4 graph infa.
+ :dedent: 1
Packet Forwarding using Graph Walk
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -311,24 +199,7 @@ specific graph object that was already created.
as per next-hop data and transmit the packet via port 'Z' by enqueuing
to ``ethdev_tx-Z`` node instance in its graph object.
-.. code-block:: c
-
- /* Main processing loop */
- static int
- graph_main_loop(void *conf)
- {
- // ...
-
- lcore_id = rte_lcore_id();
- qconf = &lcore_conf[lcore_id];
- graph = qconf->graph;
-
- RTE_LOG(INFO, L3FWD_GRAPH,
- "Entering main loop on lcore %u, graph %s(%p)\n", lcore_id,
- qconf->name, graph);
-
- /* Walk over graph until signal to quit */
- while (likely(!force_quit))
- rte_graph_walk(graph);
- return 0;
- }
+.. literalinclude:: ../../../examples/l3fwd-graph/main.c
+ :language: c
+ :start-after: Main processing loop. 8<
+ :end-before: >8 End of main processing loop.
@@ -134,54 +134,10 @@ responsible for checking if it needs to scale down frequency at run time by chec
Only the power management related initialization is shown.
-.. code-block:: c
-
- int main(int argc, char **argv)
- {
- struct lcore_conf *qconf;
- int ret;
- unsigned nb_ports;
- uint16_t queueid, portid;
- unsigned lcore_id;
- uint64_t hz;
- uint32_t n_tx_queue, nb_lcores;
- uint8_t nb_rx_queue, queue, socketid;
-
- // ...
-
- /* init RTE timer library to be used to initialize per-core timers */
-
- rte_timer_subsystem_init();
-
- // ...
-
-
- /* per-core initialization */
-
- for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
- if (rte_lcore_is_enabled(lcore_id) == 0)
- continue;
-
- /* init power management library for a specified core */
-
- ret = rte_power_init(lcore_id);
- if (ret)
- rte_exit(EXIT_FAILURE, "Power management library "
- "initialization failed on core%d\n", lcore_id);
-
- /* init timer structures for each enabled lcore */
-
- rte_timer_init(&power_timers[lcore_id]);
-
- hz = rte_get_hpet_hz();
-
- rte_timer_reset(&power_timers[lcore_id], hz/TIMER_NUMBER_PER_SECOND, SINGLE, lcore_id, power_timer_cb, NULL);
-
- // ...
- }
-
- // ...
- }
+.. literalinclude:: ../../../examples/l3fwd-power/main.c
+ :language: c
+ :start-after: Power library initialized in the main routine. 8<
+ :end-before: >8 End of power library initialization.
Monitoring Loads of Rx Queues
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -205,109 +161,10 @@ to generate hints based on recent network load trends.
Only power control related code is shown.
-.. code-block:: c
-
- static
- __rte_noreturn int main_loop(__rte_unused void *dummy)
- {
- // ...
-
- while (1) {
- // ...
-
- /**
- * Read packet from RX queues
- */
-
- lcore_scaleup_hint = FREQ_CURRENT;
- lcore_rx_idle_count = 0;
-
- for (i = 0; i < qconf->n_rx_queue; ++i)
- {
- rx_queue = &(qconf->rx_queue_list[i]);
- rx_queue->idle_hint = 0;
- portid = rx_queue->port_id;
- queueid = rx_queue->queue_id;
-
- nb_rx = rte_eth_rx_burst(portid, queueid, pkts_burst, MAX_PKT_BURST);
- stats[lcore_id].nb_rx_processed += nb_rx;
-
- if (unlikely(nb_rx == 0)) {
- /**
- * no packet received from rx queue, try to
- * sleep for a while forcing CPU enter deeper
- * C states.
- */
-
- rx_queue->zero_rx_packet_count++;
-
- if (rx_queue->zero_rx_packet_count <= MIN_ZERO_POLL_COUNT)
- continue;
-
- rx_queue->idle_hint = power_idle_heuristic(rx_queue->zero_rx_packet_count);
- lcore_rx_idle_count++;
- } else {
- rx_ring_length = rte_eth_rx_queue_count(portid, queueid);
-
- rx_queue->zero_rx_packet_count = 0;
-
- /**
- * do not scale up frequency immediately as
- * user to kernel space communication is costly
- * which might impact packet I/O for received
- * packets.
- */
-
- rx_queue->freq_up_hint = power_freq_scaleup_heuristic(lcore_id, rx_ring_length);
- }
-
- /* Prefetch and forward packets */
-
- // ...
- }
-
- if (likely(lcore_rx_idle_count != qconf->n_rx_queue)) {
- for (i = 1, lcore_scaleup_hint = qconf->rx_queue_list[0].freq_up_hint; i < qconf->n_rx_queue; ++i) {
- x_queue = &(qconf->rx_queue_list[i]);
-
- if (rx_queue->freq_up_hint > lcore_scaleup_hint)
-
- lcore_scaleup_hint = rx_queue->freq_up_hint;
- }
-
- if (lcore_scaleup_hint == FREQ_HIGHEST)
-
- rte_power_freq_max(lcore_id);
-
- else if (lcore_scaleup_hint == FREQ_HIGHER)
- rte_power_freq_up(lcore_id);
- } else {
- /**
- * All Rx queues empty in recent consecutive polls,
- * sleep in a conservative manner, meaning sleep as
- * less as possible.
- */
-
- for (i = 1, lcore_idle_hint = qconf->rx_queue_list[0].idle_hint; i < qconf->n_rx_queue; ++i) {
- rx_queue = &(qconf->rx_queue_list[i]);
- if (rx_queue->idle_hint < lcore_idle_hint)
- lcore_idle_hint = rx_queue->idle_hint;
- }
-
- if ( lcore_idle_hint < SLEEP_GEAR1_THRESHOLD)
- /**
- * execute "pause" instruction to avoid context
- * switch for short sleep.
- */
- rte_delay_us(lcore_idle_hint);
- else
- /* long sleep force ruining thread to suspend */
- usleep(lcore_idle_hint);
-
- stats[lcore_id].sleep_time += lcore_idle_hint;
- }
- }
- }
+.. literalinclude:: ../../../examples/l3fwd-power/main.c
+ :language: c
+ :start-after: Main processing loop. 8<
+ :end-before: >8 End of main processing loop.
P-State Heuristic Algorithm
~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -86,56 +86,29 @@ The main part of the code in the main() function relates to the initialization o
To fully understand this code, it is recommended to study the chapters that related to the Poll Mode Driver in the
*DPDK Programmer's Guide and the DPDK API Reference*.
-.. code-block:: c
-
- /*
- * Each logical core is assigned a dedicated TX queue on each port.
- */
-
- RTE_ETH_FOREACH_DEV(portid) {
- /* skip ports that are not enabled */
-
- if ((lsi_enabled_port_mask & (1 << portid)) == 0)
- continue;
-
- /* save the destination port id */
-
- if (nb_ports_in_mask % 2) {
- lsi_dst_ports[portid] = portid_last;
- lsi_dst_ports[portid_last] = portid;
- }
- else
- portid_last = portid;
-
- nb_ports_in_mask++;
-
- rte_eth_dev_info_get((uint8_t) portid, &dev_info);
- }
+.. literalinclude:: ../../../examples/link_status_interrupt/main.c
+ :language: c
+ :start-after: Each logical core is assigned a dedicated TX queue on each port. 8<
+ :end-before: >8 End of assigning logical core.
+ :dedent: 1
The next step is to configure the RX and TX queues.
For each port, there is only one RX queue (only one lcore is able to poll a given port).
The number of TX queues depends on the number of available lcores.
The rte_eth_dev_configure() function is used to configure the number of queues for a port:
-.. code-block:: c
-
- ret = rte_eth_dev_configure((uint8_t) portid, 1, 1, &port_conf);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n", ret, portid);
+.. literalinclude:: ../../../examples/link_status_interrupt/main.c
+ :language: c
+ :start-after: Configure RX and TX queues. 8<
+ :end-before: >8 End of configure RX and TX queues.
+ :dedent: 2
The global configuration is stored in a static structure:
-.. code-block:: c
-
- static const struct rte_eth_conf port_conf = {
- .rxmode = {
- .split_hdr_size = 0,
- },
- .txmode = {},
- .intr_conf = {
- .lsc = 1, /**< link status interrupt feature enabled */
- },
- };
+.. literalinclude:: ../../../examples/link_status_interrupt/main.c
+ :language: c
+ :start-after: Global configuration stored in a static structure. 8<
+ :end-before: >8 End of global configuration stored in a static structure.
Configuring lsc to 0 (the default) disables the generation of any link status change interrupts in kernel space
and no user space interrupt event is received.
@@ -151,30 +124,10 @@ Interrupt Callback Registration
The application can register one or more callbacks to a specific port and interrupt event.
An example callback function that has been written as indicated below.
-.. code-block:: c
-
- static void
- lsi_event_callback(uint16_t port_id, enum rte_eth_event_type type, void *param)
- {
- struct rte_eth_link link;
- int ret;
- char link_status[RTE_ETH_LINK_MAX_STR_LEN];
-
- RTE_SET_USED(param);
-
- printf("\n\nIn registered callback...\n");
-
- printf("Event type: %s\n", type == RTE_ETH_EVENT_INTR_LSC ? "LSC interrupt" : "unknown event");
-
- ret = rte_eth_link_get_nowait(port_id, &link);
- if (ret < 0) {
- printf("Failed to get port %d link status: %s\n\n",
- port_id, rte_strerror(-ret));
- } else {
- rte_eth_link_to_str(link_status, sizeof(link_status), &link);
- printf("Port %d %s\n", port_id, link_status);
- }
- }
+.. literalinclude:: ../../../examples/link_status_interrupt/main.c
+ :language: c
+ :start-after: lsi_event_callback 8<
+ :end-before: >8 End of registering one or more callbacks.
This function is called when a link status interrupt is present for the right port.
The port_id indicates which port the interrupt applies to.
@@ -186,9 +139,11 @@ which is different from the main thread of its caller.
The application registers the lsi_event_callback and a NULL parameter to the link status interrupt event on each port:
-.. code-block:: c
-
- rte_eth_dev_callback_register((uint8_t)portid, RTE_ETH_EVENT_INTR_LSC, lsi_event_callback, NULL);
+.. literalinclude:: ../../../examples/link_status_interrupt/main.c
+ :language: c
+ :start-after: RTE callback register. 8<
+ :end-before: >8 End of registering lsi interrupt callback.
+ :dedent: 2
This registration can be done only after calling the rte_eth_dev_configure() function and before calling any other function.
If lsc is initialized with 0, the callback is never called since no interrupt event would ever be present.
@@ -203,38 +158,28 @@ For example, if the user specifies -q 4, the application is able to poll four po
If there are 16 ports on the target (and if the portmask argument is -p ffff),
the application will need four lcores to poll all the ports.
-.. code-block:: c
-
- ret = rte_eth_rx_queue_setup((uint8_t) portid, 0, nb_rxd, SOCKET0, &rx_conf, lsi_pktmbuf_pool);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: err=%d, port=%u\n", ret, portid);
+.. literalinclude:: ../../../examples/link_status_interrupt/main.c
+ :language: c
+ :start-after: RX queue initialization. 8<
+ :end-before: >8 End of RX queue initialization.
+ :dedent: 2
The list of queues that must be polled for a given lcore is stored in a private structure called struct lcore_queue_conf.
-.. code-block:: c
-
- struct lcore_queue_conf {
- unsigned n_rx_port;
- unsigned rx_port_list[MAX_RX_QUEUE_PER_LCORE]; unsigned tx_queue_id;
- struct mbuf_table tx_mbufs[LSI_MAX_PORTS];
- } rte_cache_aligned;
-
- struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
+.. literalinclude:: ../../../examples/link_status_interrupt/main.c
+ :language: c
+ :start-after: List of queues must be polled for a give lcore. 8<
+ :end-before: >8 End of list of queues to be polled.
The n_rx_port and rx_port_list[] fields are used in the main packet processing loop
(see `Receive, Process and Transmit Packets`_).
The global configuration for the RX queues is stored in a static structure:
-.. code-block:: c
-
- static const struct rte_eth_rxconf rx_conf = {
- .rx_thresh = {
- .pthresh = RX_PTHRESH,
- .hthresh = RX_HTHRESH,
- .wthresh = RX_WTHRESH,
- },
- };
+.. literalinclude:: ../../../examples/link_status_interrupt/main.c
+ :language: c
+ :start-after: List of queues must be polled for a give lcore. 8<
+ :end-before: >8 End of list of queues to be polled.
TX Queue Initialization
~~~~~~~~~~~~~~~~~~~~~~~
@@ -242,15 +187,11 @@ TX Queue Initialization
Each lcore should be able to transmit on any port.
For every port, a single TX queue is initialized.
-.. code-block:: c
-
- /* init one TX queue logical core on each port */
-
- fflush(stdout);
-
- ret = rte_eth_tx_queue_setup(portid, 0, nb_txd, rte_eth_dev_socket_id(portid), &tx_conf);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d,port=%u\n", ret, (unsigned) portid);
+.. literalinclude:: ../../../examples/link_status_interrupt/main.c
+ :language: c
+ :start-after: init one TX queue logical core on each port. 8<
+ :end-before: >8 End of init one TX queue.
+ :dedent: 2
The global configuration for TX queues is stored in a static structure:
@@ -271,23 +212,11 @@ Receive, Process and Transmit Packets
In the lsi_main_loop() function, the main task is to read ingress packets from the RX queues.
This is done using the following code:
-.. code-block:: c
-
- /*
- * Read packet from RX queues
- */
-
- for (i = 0; i < qconf->n_rx_port; i++) {
- portid = qconf->rx_port_list[i];
- nb_rx = rte_eth_rx_burst((uint8_t) portid, 0, pkts_burst, MAX_PKT_BURST);
- port_statistics[portid].rx += nb_rx;
-
- for (j = 0; j < nb_rx; j++) {
- m = pkts_burst[j];
- rte_prefetch0(rte_pktmbuf_mtod(m, void *));
- lsi_simple_forward(m, portid);
- }
- }
+.. literalinclude:: ../../../examples/link_status_interrupt/main.c
+ :language: c
+ :start-after: Read packet from RX queues. 8<
+ :end-before: >8 End of reading packet from RX queues.
+ :dedent: 2
Packets are read in a burst of size MAX_PKT_BURST.
The rte_eth_rx_burst() function writes the mbuf pointers in a local table and returns the number of available mbufs in the table.
@@ -302,28 +231,10 @@ The processing is very simple: processes the TX port from the RX port and then r
If portId is odd, the first line subtracts one and the second line does nothing.
Therefore, 0 goes to 1, and 1 to 0, 2 goes to 3 and 3 to 2, and so on.
-.. code-block:: c
-
- static void
- lsi_simple_forward(struct rte_mbuf *m, unsigned portid)
- {
- struct rte_ether_hdr *eth;
- void *tmp;
- unsigned dst_port = lsi_dst_ports[portid];
-
- eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
-
- /* 02:00:00:00:00:xx */
-
- tmp = ð->d_addr.addr_bytes[0];
-
- *((uint64_t *)tmp) = 0x000000000002 + (dst_port << 40);
-
- /* src addr */
- rte_ether_addr_copy(&lsi_ports_eth_addr[dst_port], ð->s_addr);
-
- lsi_send_packet(m, dst_port);
- }
+.. literalinclude:: ../../../examples/link_status_interrupt/main.c
+ :language: c
+ :start-after: Replacing the source and destination MAC addresses. 8<
+ :end-before: >8 End of replacing the source and destination MAC addresses.
Then, the packet is sent using the lsi_send_packet(m, dst_port) function.
For this test application, the processing is exactly the same for all packets arriving on the same RX port.
@@ -338,77 +249,17 @@ The application is implemented to illustrate that so the same approach can be re
The lsi_send_packet() function stores the packet in a per-lcore and per-txport table.
If the table is full, the whole packets table is transmitted using the lsi_send_burst() function:
-.. code-block:: c
-
- /* Send the packet on an output interface */
-
- static int
- lsi_send_packet(struct rte_mbuf *m, uint16_t port)
- {
- unsigned lcore_id, len;
- struct lcore_queue_conf *qconf;
-
- lcore_id = rte_lcore_id();
- qconf = &lcore_queue_conf[lcore_id];
- len = qconf->tx_mbufs[port].len;
- qconf->tx_mbufs[port].m_table[len] = m;
- len++;
-
- /* enough pkts to be sent */
-
- if (unlikely(len == MAX_PKT_BURST)) {
- lsi_send_burst(qconf, MAX_PKT_BURST, port);
- len = 0;
- }
- qconf->tx_mbufs[port].len = len;
-
- return 0;
- }
+.. literalinclude:: ../../../examples/l2fwd-crypto/main.c
+ :language: c
+ :start-after: Enqueue packets for TX and prepare them to be sent. 8<
+ :end-before: >8 End of Enqueuing packets for TX.
To ensure that no packets remain in the tables, each lcore does a draining of the TX queue in its main loop.
This technique introduces some latency when there are not many packets to send.
However, it improves performance:
-.. code-block:: c
-
- cur_tsc = rte_rdtsc();
-
- /*
- * TX burst queue drain
- */
-
- diff_tsc = cur_tsc - prev_tsc;
-
- if (unlikely(diff_tsc > drain_tsc)) {
- /* this could be optimized (use queueid instead of * portid), but it is not called so often */
-
- for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
- if (qconf->tx_mbufs[portid].len == 0)
- continue;
-
- lsi_send_burst(&lcore_queue_conf[lcore_id],
- qconf->tx_mbufs[portid].len, (uint8_t) portid);
- qconf->tx_mbufs[portid].len = 0;
- }
-
- /* if timer is enabled */
-
- if (timer_period > 0) {
- /* advance the timer */
-
- timer_tsc += diff_tsc;
-
- /* if timer has reached its timeout */
-
- if (unlikely(timer_tsc >= (uint64_t) timer_period)) {
- /* do this only on main core */
- if (lcore_id == rte_get_main_lcore()) {
- print_stats();
-
- /* reset the timer */
- timer_tsc = 0;
- }
- }
- }
- prev_tsc = cur_tsc;
- }
+.. literalinclude:: ../../../examples/link_status_interrupt/main.c
+ :language: c
+ :start-after: Draining TX queue in its main loop. 8<
+ :end-before: >8 End of draining TX queue in its main loop.
+ :dedent: 2
@@ -113,17 +113,11 @@ These three objects are created at startup by the primary process,
since the secondary process cannot create objects in memory as it cannot reserve memory zones,
and the secondary process then uses lookup functions to attach to these objects as it starts up.
-.. code-block:: c
-
- if (rte_eal_process_type() == RTE_PROC_PRIMARY){
- send_ring = rte_ring_create(_PRI_2_SEC, ring_size, SOCKET0, flags);
- recv_ring = rte_ring_create(_SEC_2_PRI, ring_size, SOCKET0, flags);
- message_pool = rte_mempool_create(_MSG_POOL, pool_size, string_size, pool_cache, priv_data_sz, NULL, NULL, NULL, NULL, SOCKET0, flags);
- } else {
- recv_ring = rte_ring_lookup(_PRI_2_SEC);
- send_ring = rte_ring_lookup(_SEC_2_PRI);
- message_pool = rte_mempool_lookup(_MSG_POOL);
- }
+.. literalinclude:: ../../../examples/multi_process/simple_mp/main.c
+ :language: c
+ :start-after: Start of ring structure. 8<
+ :end-before: >8 End of ring structure.
+ :dedent: 1
Note, however, that the named ring structure used as send_ring in the primary process is the recv_ring in the secondary process.
@@ -216,16 +210,11 @@ the number of RX and TX queues per port being determined by the num-procs parame
The structures for the initialized network ports are stored in shared memory and
therefore will be accessible by the secondary process as it initializes.
-.. code-block:: c
-
- if (num_ports & 1)
- rte_exit(EXIT_FAILURE, "Application must use an even number of ports\n");
-
- for(i = 0; i < num_ports; i++){
- if(proc_type == RTE_PROC_PRIMARY)
- if (smp_port_init(ports[i], mp, (uint16_t)num_procs) < 0)
- rte_exit(EXIT_FAILURE, "Error initializing ports\n");
- }
+.. literalinclude:: ../../../examples/multi_process/symmetric_mp/main.c
+ :language: c
+ :start-after: Primary instance initialized. 8<
+ :end-before: >8 End of primary instance initialization.
+ :dedent: 1
In the secondary instance, rather than initializing the network ports, the port information exported by the primary process is used,
giving the secondary process access to the hardware and software rings for each network port.
@@ -95,30 +95,28 @@ The first task is to initialize the Environment Abstraction Layer (EAL). The
``argc`` and ``argv`` arguments are provided to the ``rte_eal_init()``
function. The value returned is the number of parsed arguments:
-.. code-block:: c
-
- int ret = rte_eal_init(argc, argv);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
+.. literalinclude:: ../../../examples/ptpclient/ptpclient.c
+ :language: c
+ :start-after: Initialize the Environment Abstraction Layer (EAL). 8<
+ :end-before: >8 End of initialization of EAL.
+ :dedent: 1
And than we parse application specific arguments
-.. code-block:: c
-
- argc -= ret;
- argv += ret;
-
- ret = ptp_parse_args(argc, argv);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Error with PTP initialization\n");
+.. literalinclude:: ../../../examples/ptpclient/ptpclient.c
+ :language: c
+ :start-after: Parse specific arguments. 8<
+ :end-before: >8 End of parsing specific arguments.
+ :dedent: 1
The ``main()`` also allocates a mempool to hold the mbufs (Message Buffers)
used by the application:
-.. code-block:: c
-
- mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", NUM_MBUFS * nb_ports,
- MBUF_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
+.. literalinclude:: ../../../examples/ptpclient/ptpclient.c
+ :language: c
+ :start-after: Creates a new mempool in memory to hold the mbufs. 8<
+ :end-before: >8 End of a new mempool in memory to hold the mbufs.
+ :dedent: 1
Mbufs are the packet buffer structure used by DPDK. They are explained in
detail in the "Mbuf Library" section of the *DPDK Programmer's Guide*.
@@ -126,19 +124,11 @@ detail in the "Mbuf Library" section of the *DPDK Programmer's Guide*.
The ``main()`` function also initializes all the ports using the user defined
``port_init()`` function with portmask provided by user:
-.. code-block:: c
-
- for (portid = 0; portid < nb_ports; portid++)
- if ((ptp_enabled_port_mask & (1 << portid)) != 0) {
-
- if (port_init(portid, mbuf_pool) == 0) {
- ptp_enabled_ports[ptp_enabled_port_nb] = portid;
- ptp_enabled_port_nb++;
- } else {
- rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8 "\n",
- portid);
- }
- }
+.. literalinclude:: ../../../examples/ptpclient/ptpclient.c
+ :language: c
+ :start-after: Initialize all ports. 8<
+ :end-before: >8 End of initialization of all ports.
+ :dedent: 1
Once the initialization is complete, the application is ready to launch a
@@ -160,21 +150,11 @@ available lcores.
The main work of the application is done within the loop:
-.. code-block:: c
-
- for (portid = 0; portid < ptp_enabled_port_nb; portid++) {
-
- portid = ptp_enabled_ports[portid];
- nb_rx = rte_eth_rx_burst(portid, 0, &m, 1);
-
- if (likely(nb_rx == 0))
- continue;
-
- if (m->ol_flags & PKT_RX_IEEE1588_PTP)
- parse_ptp_frames(portid, m);
-
- rte_pktmbuf_free(m);
- }
+.. literalinclude:: ../../../examples/ptpclient/ptpclient.c
+ :language: c
+ :start-after: Read packet from RX queues. 8<
+ :end-before: >8 End of read packets from RX queues.
+ :dedent: 2
Packets are received one by one on the RX ports and, if required, PTP response
packets are transmitted on the TX ports.
@@ -182,10 +162,11 @@ packets are transmitted on the TX ports.
If the offload flags in the mbuf indicate that the packet is a PTP packet then
the packet is parsed to determine which type:
-.. code-block:: c
-
- if (m->ol_flags & PKT_RX_IEEE1588_PTP)
- parse_ptp_frames(portid, m);
+.. literalinclude:: ../../../examples/ptpclient/ptpclient.c
+ :language: c
+ :start-after: Packet is parsed to determine which type. 8<
+ :end-before: >8 End of packet is parsed to determine which type.
+ :dedent: 3
All packets are freed explicitly using ``rte_pktmbuf_free()``.
@@ -200,39 +181,10 @@ PTP parsing
The ``parse_ptp_frames()`` function processes PTP packets, implementing slave
PTP IEEE1588 L2 functionality.
-.. code-block:: c
-
- void
- parse_ptp_frames(uint16_t portid, struct rte_mbuf *m) {
- struct ptp_header *ptp_hdr;
- struct rte_ether_hdr *eth_hdr;
- uint16_t eth_type;
-
- eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
- eth_type = rte_be_to_cpu_16(eth_hdr->ether_type);
-
- if (eth_type == PTP_PROTOCOL) {
- ptp_data.m = m;
- ptp_data.portid = portid;
- ptp_hdr = (struct ptp_header *)(rte_pktmbuf_mtod(m, char *)
- + sizeof(struct rte_ether_hdr));
-
- switch (ptp_hdr->msgtype) {
- case SYNC:
- parse_sync(&ptp_data);
- break;
- case FOLLOW_UP:
- parse_fup(&ptp_data);
- break;
- case DELAY_RESP:
- parse_drsp(&ptp_data);
- print_clock_info(&ptp_data);
- break;
- default:
- break;
- }
- }
- }
+.. literalinclude:: ../../../examples/ptpclient/ptpclient.c
+ :language: c
+ :start-after: Parse ptp frames. 8<
+ :end-before: >8 End of function processes PTP packets.
There are 3 types of packets on the RX path which we must parse to create a minimal
implementation of the PTP slave client:
@@ -61,34 +61,20 @@ Explanation
Selecting one of the metering modes is done with these defines:
-.. code-block:: c
-
- #define APP_MODE_FWD 0
- #define APP_MODE_SRTCM_COLOR_BLIND 1
- #define APP_MODE_SRTCM_COLOR_AWARE 2
- #define APP_MODE_TRTCM_COLOR_BLIND 3
- #define APP_MODE_TRTCM_COLOR_AWARE 4
-
- #define APP_MODE APP_MODE_SRTCM_COLOR_BLIND
+.. literalinclude:: ../../../examples/qos_meter/main.c
+ :language: c
+ :start-after: Traffic metering configuration. 8<
+ :end-before: >8 End of traffic metering configuration.
To simplify debugging (for example, by using the traffic generator RX side MAC address based packet filtering feature),
the color is defined as the LSB byte of the destination MAC address.
The traffic meter parameters are configured in the application source code with following default values:
-.. code-block:: c
-
- struct rte_meter_srtcm_params app_srtcm_params[] = {
-
- {.cir = 1000000 * 46, .cbs = 2048, .ebs = 2048},
-
- };
-
- struct rte_meter_trtcm_params app_trtcm_params[] = {
-
- {.cir = 1000000 * 46, .pir = 1500000 * 46, .cbs = 2048, .pbs = 2048},
-
- };
+.. literalinclude:: ../../../examples/qos_meter/main.c
+ :language: c
+ :start-after: Traffic meter parameters are configured in the application. 8<
+ :end-before: >8 End of traffic meter parameters are configured in the application.
Assuming the input traffic is generated at line rate and all packets are 64 bytes Ethernet frames (IPv4 packet size of 46 bytes)
and green, the expected output traffic should be marked as shown in the following table:
@@ -120,14 +106,10 @@ and green, the expected output traffic should be marked as shown in the followin
To set up the policing scheme as desired, it is necessary to modify the main.h source file,
where this policy is implemented as a static structure, as follows:
-.. code-block:: c
-
- int policer_table[e_RTE_METER_COLORS][e_RTE_METER_COLORS] =
- {
- { GREEN, RED, RED},
- { DROP, YELLOW, RED},
- { DROP, DROP, RED}
- };
+.. literalinclude:: ../../../examples/qos_meter/main.h
+ :language: c
+ :start-after: Policy implemented as a static structure. 8<
+ :end-before: >8 End of policy implemented as a static structure.
Where rows indicate the input color, columns indicate the output color,
and the value that is stored in the table indicates the action to be taken for that particular case.
@@ -123,147 +123,8 @@ needed for the QoS scheduler configuration.
The profile file has the following format:
-::
-
- ; port configuration [port]
-
- frame overhead = 24
- number of subports per port = 1
-
- ; Subport configuration
-
- [subport 0]
- number of pipes per subport = 4096
- queue sizes = 64 64 64 64 64 64 64 64 64 64 64 64 64
-
- subport 0-8 = 0 ; These subports are configured with subport profile 0
-
- [subport profile 0]
- tb rate = 1250000000; Bytes per second
- tb size = 1000000; Bytes
- tc 0 rate = 1250000000; Bytes per second
- tc 1 rate = 1250000000; Bytes per second
- tc 2 rate = 1250000000; Bytes per second
- tc 3 rate = 1250000000; Bytes per second
- tc 4 rate = 1250000000; Bytes per second
- tc 5 rate = 1250000000; Bytes per second
- tc 6 rate = 1250000000; Bytes per second
- tc 7 rate = 1250000000; Bytes per second
- tc 8 rate = 1250000000; Bytes per second
- tc 9 rate = 1250000000; Bytes per second
- tc 10 rate = 1250000000; Bytes per second
- tc 11 rate = 1250000000; Bytes per second
- tc 12 rate = 1250000000; Bytes per second
-
- tc period = 10; Milliseconds
- tc oversubscription period = 10; Milliseconds
-
- pipe 0-4095 = 0; These pipes are configured with pipe profile 0
-
- ; Pipe configuration
-
- [pipe profile 0]
- tb rate = 305175; Bytes per second
- tb size = 1000000; Bytes
-
- tc 0 rate = 305175; Bytes per second
- tc 1 rate = 305175; Bytes per second
- tc 2 rate = 305175; Bytes per second
- tc 3 rate = 305175; Bytes per second
- tc 4 rate = 305175; Bytes per second
- tc 5 rate = 305175; Bytes per second
- tc 6 rate = 305175; Bytes per second
- tc 7 rate = 305175; Bytes per second
- tc 8 rate = 305175; Bytes per second
- tc 9 rate = 305175; Bytes per second
- tc 10 rate = 305175; Bytes per second
- tc 11 rate = 305175; Bytes per second
- tc 12 rate = 305175; Bytes per second
- tc period = 40; Milliseconds
-
- tc 0 oversubscription weight = 1
- tc 1 oversubscription weight = 1
- tc 2 oversubscription weight = 1
- tc 3 oversubscription weight = 1
- tc 4 oversubscription weight = 1
- tc 5 oversubscription weight = 1
- tc 6 oversubscription weight = 1
- tc 7 oversubscription weight = 1
- tc 8 oversubscription weight = 1
- tc 9 oversubscription weight = 1
- tc 10 oversubscription weight = 1
- tc 11 oversubscription weight = 1
- tc 12 oversubscription weight = 1
-
- tc 12 wrr weights = 1 1 1 1
-
- ; RED params per traffic class and color (Green / Yellow / Red)
-
- [red]
- tc 0 wred min = 48 40 32
- tc 0 wred max = 64 64 64
- tc 0 wred inv prob = 10 10 10
- tc 0 wred weight = 9 9 9
-
- tc 1 wred min = 48 40 32
- tc 1 wred max = 64 64 64
- tc 1 wred inv prob = 10 10 10
- tc 1 wred weight = 9 9 9
-
- tc 2 wred min = 48 40 32
- tc 2 wred max = 64 64 64
- tc 2 wred inv prob = 10 10 10
- tc 2 wred weight = 9 9 9
-
- tc 3 wred min = 48 40 32
- tc 3 wred max = 64 64 64
- tc 3 wred inv prob = 10 10 10
- tc 3 wred weight = 9 9 9
-
- tc 4 wred min = 48 40 32
- tc 4 wred max = 64 64 64
- tc 4 wred inv prob = 10 10 10
- tc 4 wred weight = 9 9 9
-
- tc 5 wred min = 48 40 32
- tc 5 wred max = 64 64 64
- tc 5 wred inv prob = 10 10 10
- tc 5 wred weight = 9 9 9
-
- tc 6 wred min = 48 40 32
- tc 6 wred max = 64 64 64
- tc 6 wred inv prob = 10 10 10
- tc 6 wred weight = 9 9 9
-
- tc 7 wred min = 48 40 32
- tc 7 wred max = 64 64 64
- tc 7 wred inv prob = 10 10 10
- tc 7 wred weight = 9 9 9
-
- tc 8 wred min = 48 40 32
- tc 8 wred max = 64 64 64
- tc 8 wred inv prob = 10 10 10
- tc 8 wred weight = 9 9 9
-
- tc 9 wred min = 48 40 32
- tc 9 wred max = 64 64 64
- tc 9 wred inv prob = 10 10 10
- tc 9 wred weight = 9 9 9
-
- tc 10 wred min = 48 40 32
- tc 10 wred max = 64 64 64
- tc 10 wred inv prob = 10 10 10
- tc 10 wred weight = 9 9 9
-
- tc 11 wred min = 48 40 32
- tc 11 wred max = 64 64 64
- tc 11 wred inv prob = 10 10 10
- tc 11 wred weight = 9 9 9
-
- tc 12 wred min = 48 40 32
- tc 12 wred max = 64 64 64
- tc 12 wred inv prob = 10 10 10
- tc 12 wred weight = 9 9 9
+.. literalinclude:: ../../../examples/qos_sched/profile.cfg
+ :start-after: Data Plane Development Kit (DPDK) Programmer's Guide
Interactive mode
~~~~~~~~~~~~~~~~
@@ -72,62 +72,19 @@ The Port Initialization Function
The main functional part of the port initialization is shown below with
comments:
-.. code-block:: c
-
- static inline int
- port_init(uint16_t port, struct rte_mempool *mbuf_pool)
- {
- struct rte_eth_conf port_conf = port_conf_default;
- const uint16_t rx_rings = 1, tx_rings = 1;
- struct rte_ether_addr addr;
- int retval;
- uint16_t q;
-
- /* Configure the Ethernet device. */
- retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
- if (retval != 0)
- return retval;
-
- /* Allocate and set up 1 RX queue per Ethernet port. */
- for (q = 0; q < rx_rings; q++) {
- retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE,
- rte_eth_dev_socket_id(port), NULL, mbuf_pool);
- if (retval < 0)
- return retval;
- }
-
- /* Allocate and set up 1 TX queue per Ethernet port. */
- for (q = 0; q < tx_rings; q++) {
- retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE,
- rte_eth_dev_socket_id(port), NULL);
- if (retval < 0)
- return retval;
- }
-
- /* Start the Ethernet port. */
- retval = rte_eth_dev_start(port);
- if (retval < 0)
- return retval;
-
- /* Enable RX in promiscuous mode for the Ethernet device. */
- retval = rte_eth_promiscuous_enable(port);
- if (retval != 0)
- return retval;
-
- /* Add the callbacks for RX and TX.*/
- rte_eth_add_rx_callback(port, 0, add_timestamps, NULL);
- rte_eth_add_tx_callback(port, 0, calc_latency, NULL);
-
- return 0;
- }
+.. literalinclude:: ../../../examples/rxtx_callbacks/main.c
+ :language: c
+ :start-after: Port initialization. 8<
+ :end-before: >8 End of port initialization.
The RX and TX callbacks are added to the ports/queues as function pointers:
-.. code-block:: c
-
- rte_eth_add_rx_callback(port, 0, add_timestamps, NULL);
- rte_eth_add_tx_callback(port, 0, calc_latency, NULL);
+.. literalinclude:: ../../../examples/rxtx_callbacks/main.c
+ :language: c
+ :start-after: RX and TX callbacks are added to the ports. 8<
+ :end-before: >8 End of RX and TX callbacks.
+ :dedent: 1
More than one callback can be added and additional information can be passed
to callback function pointers as a ``void*``. In the examples above ``NULL``
@@ -142,20 +99,10 @@ The add_timestamps() Callback
The ``add_timestamps()`` callback is added to the RX port and is applied to
all packets received:
-.. code-block:: c
-
- static uint16_t
- add_timestamps(uint16_t port __rte_unused, uint16_t qidx __rte_unused,
- struct rte_mbuf **pkts, uint16_t nb_pkts, void *_ __rte_unused)
- {
- unsigned i;
- uint64_t now = rte_rdtsc();
-
- for (i = 0; i < nb_pkts; i++)
- *tsc_field(pkts[i]) = now;
-
- return nb_pkts;
- }
+.. literalinclude:: ../../../examples/rxtx_callbacks/main.c
+ :language: c
+ :start-after: Callback added to the RX port and applied to packets. 8<
+ :end-before: >8 End of callback addition and application.
The DPDK function ``rte_rdtsc()`` is used to add a cycle count timestamp to
each packet (see the *cycles* section of the *DPDK API Documentation* for
@@ -168,31 +115,10 @@ The calc_latency() Callback
The ``calc_latency()`` callback is added to the TX port and is applied to all
packets prior to transmission:
-.. code-block:: c
-
- static uint16_t
- calc_latency(uint16_t port __rte_unused, uint16_t qidx __rte_unused,
- struct rte_mbuf **pkts, uint16_t nb_pkts, void *_ __rte_unused)
- {
- uint64_t cycles = 0;
- uint64_t now = rte_rdtsc();
- unsigned i;
-
- for (i = 0; i < nb_pkts; i++)
- cycles += now - *tsc_field(pkts[i]);
-
- latency_numbers.total_cycles += cycles;
- latency_numbers.total_pkts += nb_pkts;
-
- if (latency_numbers.total_pkts > (100 * 1000 * 1000ULL)) {
- printf("Latency = %"PRIu64" cycles\n",
- latency_numbers.total_cycles / latency_numbers.total_pkts);
-
- latency_numbers.total_cycles = latency_numbers.total_pkts = 0;
- }
-
- return nb_pkts;
- }
+.. literalinclude:: ../../../examples/rxtx_callbacks/main.c
+ :language: c
+ :start-after: Callback is added to the TX port. 8<
+ :end-before: >8 End of callback addition.
The ``calc_latency()`` function accumulates the total number of packets and
the total number of cycles used. Once more than 100 million packets have been
@@ -137,120 +137,29 @@ which is used to distribute packets to nodes, which the number of flows
specified in the command line (1 million, by default).
-.. code-block:: c
-
- static void
- create_efd_table(void)
- {
- uint8_t socket_id = rte_socket_id();
-
- /* create table */
- efd_table = rte_efd_create("flow table", num_flows * 2, sizeof(uint32_t),
- 1 << socket_id, socket_id);
-
- if (efd_table == NULL)
- rte_exit(EXIT_FAILURE, "Problem creating the flow table\n");
- }
-
- static void
- populate_efd_table(void)
- {
- unsigned int i;
- int32_t ret;
- uint32_t ip_dst;
- uint8_t socket_id = rte_socket_id();
- uint64_t node_id;
-
- /* Add flows in table */
- for (i = 0; i < num_flows; i++) {
- node_id = i % num_nodes;
-
- ip_dst = rte_cpu_to_be_32(i);
- ret = rte_efd_update(efd_table, socket_id,
- (void *)&ip_dst, (efd_value_t)node_id);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Unable to add entry %u in "
- "EFD table\n", i);
- }
-
- printf("EFD table: Adding 0x%x keys\n", num_flows);
- }
+.. literalinclude:: ../../../examples/server_node_efd/server/init.c
+ :language: c
+ :start-after: Create EFD table. 8<
+ :end-before: >8 End of creation EFD table.
After initialization, packets are received from the enabled ports, and the IPv4
address from the packets is used as a key to look up in the EFD table,
which tells the node where the packet has to be distributed.
-.. code-block:: c
-
- static void
- process_packets(uint32_t port_num __rte_unused, struct rte_mbuf *pkts[],
- uint16_t rx_count, unsigned int socket_id)
- {
- uint16_t i;
- uint8_t node;
- efd_value_t data[EFD_BURST_MAX];
- const void *key_ptrs[EFD_BURST_MAX];
-
- struct rte_ipv4_hdr *ipv4_hdr;
- uint32_t ipv4_dst_ip[EFD_BURST_MAX];
-
- for (i = 0; i < rx_count; i++) {
- /* Handle IPv4 header.*/
- ipv4_hdr = rte_pktmbuf_mtod_offset(pkts[i], struct rte_ipv4_hdr *,
- sizeof(struct rte_ether_hdr));
- ipv4_dst_ip[i] = ipv4_hdr->dst_addr;
- key_ptrs[i] = (void *)&ipv4_dst_ip[i];
- }
-
- rte_efd_lookup_bulk(efd_table, socket_id, rx_count,
- (const void **) key_ptrs, data);
- for (i = 0; i < rx_count; i++) {
- node = (uint8_t) ((uintptr_t)data[i]);
-
- if (node >= num_nodes) {
- /*
- * Node is out of range, which means that
- * flow has not been inserted
- */
- flow_dist_stats.drop++;
- rte_pktmbuf_free(pkts[i]);
- } else {
- flow_dist_stats.distributed++;
- enqueue_rx_packet(node, pkts[i]);
- }
- }
-
- for (i = 0; i < num_nodes; i++)
- flush_rx_queue(i);
- }
+.. literalinclude:: ../../../examples/server_node_efd/server/main.c
+ :language: c
+ :start-after: Processing packets. 8<
+ :end-before: >8 End of process_packets.
The burst of packets received is enqueued in temporary buffers (per node),
and enqueued in the shared ring between the server and the node.
After this, a new burst of packets is received and this process is
repeated infinitely.
-.. code-block:: c
-
- static void
- flush_rx_queue(uint16_t node)
- {
- uint16_t j;
- struct node *cl;
-
- if (cl_rx_buf[node].count == 0)
- return;
-
- cl = &nodes[node];
- if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[node].buffer,
- cl_rx_buf[node].count, NULL) != cl_rx_buf[node].count){
- for (j = 0; j < cl_rx_buf[node].count; j++)
- rte_pktmbuf_free(cl_rx_buf[node].buffer[j]);
- cl->stats.rx_drop += cl_rx_buf[node].count;
- } else
- cl->stats.rx += cl_rx_buf[node].count;
-
- cl_rx_buf[node].count = 0;
- }
+.. literalinclude:: ../../../examples/server_node_efd/server/main.c
+ :language: c
+ :start-after: Flush rx queue. 8<
+ :end-before: >8 End of sending a burst of traffic to a node.
The second process, the back-end node, receives the packets from the shared
ring with the server and send them out, if they belong to the node.
@@ -258,83 +167,19 @@ ring with the server and send them out, if they belong to the node.
At initialization, it attaches to the server process memory, to have
access to the shared ring, parameters and statistics.
-.. code-block:: c
-
- rx_ring = rte_ring_lookup(get_rx_queue_name(node_id));
- if (rx_ring == NULL)
- rte_exit(EXIT_FAILURE, "Cannot get RX ring - "
- "is server process running?\n");
-
- mp = rte_mempool_lookup(PKTMBUF_POOL_NAME);
- if (mp == NULL)
- rte_exit(EXIT_FAILURE, "Cannot get mempool for mbufs\n");
-
- mz = rte_memzone_lookup(MZ_SHARED_INFO);
- if (mz == NULL)
- rte_exit(EXIT_FAILURE, "Cannot get port info structure\n");
- info = mz->addr;
- tx_stats = &(info->tx_stats[node_id]);
- filter_stats = &(info->filter_stats[node_id]);
+.. literalinclude:: ../../../examples/server_node_efd/node/node.c
+ :language: c
+ :start-after: Attaching to the server process memory. 8<
+ :end-before: >8 End of attaching to the server process memory.
+ :dedent: 1
Then, the hash table that contains the flows that will be handled
by the node is created and populated.
-.. code-block:: c
-
- static struct rte_hash *
- create_hash_table(const struct shared_info *info)
- {
- uint32_t num_flows_node = info->num_flows / info->num_nodes;
- char name[RTE_HASH_NAMESIZE];
- struct rte_hash *h;
-
- /* create table */
- struct rte_hash_parameters hash_params = {
- .entries = num_flows_node * 2, /* table load = 50% */
- .key_len = sizeof(uint32_t), /* Store IPv4 dest IP address */
- .socket_id = rte_socket_id(),
- .hash_func_init_val = 0,
- };
-
- snprintf(name, sizeof(name), "hash_table_%d", node_id);
- hash_params.name = name;
- h = rte_hash_create(&hash_params);
-
- if (h == NULL)
- rte_exit(EXIT_FAILURE,
- "Problem creating the hash table for node %d\n",
- node_id);
- return h;
- }
-
- static void
- populate_hash_table(const struct rte_hash *h, const struct shared_info *info)
- {
- unsigned int i;
- int32_t ret;
- uint32_t ip_dst;
- uint32_t num_flows_node = 0;
- uint64_t target_node;
-
- /* Add flows in table */
- for (i = 0; i < info->num_flows; i++) {
- target_node = i % info->num_nodes;
- if (target_node != node_id)
- continue;
-
- ip_dst = rte_cpu_to_be_32(i);
-
- ret = rte_hash_add_key(h, (void *) &ip_dst);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Unable to add entry %u "
- "in hash table\n", i);
- else
- num_flows_node++;
-
- }
-
- printf("Hash table: Adding 0x%x keys\n", num_flows_node);
- }
+.. literalinclude:: ../../../examples/server_node_efd/node/node.c
+ :language: c
+ :start-after: Creation of hash table. 8<
+ :end-before: >8 End of creation of hash table.
After initialization, packets are dequeued from the shared ring
(from the server) and, like in the server process,
@@ -343,108 +188,15 @@ If there is a hit, packet is stored in a buffer, to be eventually transmitted
in one of the enabled ports. If key is not there, packet is dropped, since the
flow is not handled by the node.
-.. code-block:: c
-
- static inline void
- handle_packets(struct rte_hash *h, struct rte_mbuf **bufs, uint16_t num_packets)
- {
- struct rte_ipv4_hdr *ipv4_hdr;
- uint32_t ipv4_dst_ip[PKT_READ_SIZE];
- const void *key_ptrs[PKT_READ_SIZE];
- unsigned int i;
- int32_t positions[PKT_READ_SIZE] = {0};
-
- for (i = 0; i < num_packets; i++) {
- /* Handle IPv4 header.*/
- ipv4_hdr = rte_pktmbuf_mtod_offset(bufs[i], struct rte_ipv4_hdr *,
- sizeof(struct rte_ether_hdr));
- ipv4_dst_ip[i] = ipv4_hdr->dst_addr;
- key_ptrs[i] = &ipv4_dst_ip[i];
- }
- /* Check if packets belongs to any flows handled by this node */
- rte_hash_lookup_bulk(h, key_ptrs, num_packets, positions);
-
- for (i = 0; i < num_packets; i++) {
- if (likely(positions[i] >= 0)) {
- filter_stats->passed++;
- transmit_packet(bufs[i]);
- } else {
- filter_stats->drop++;
- /* Drop packet, as flow is not handled by this node */
- rte_pktmbuf_free(bufs[i]);
- }
- }
- }
+.. literalinclude:: ../../../examples/server_node_efd/node/node.c
+ :language: c
+ :start-after: Packets dequeued from the shared ring. 8<
+ :end-before: >8 End of packets dequeueing.
Finally, note that both processes updates statistics, such as transmitted, received
and dropped packets, which are shown and refreshed by the server app.
-.. code-block:: c
-
- static void
- do_stats_display(void)
- {
- unsigned int i, j;
- const char clr[] = {27, '[', '2', 'J', '\0'};
- const char topLeft[] = {27, '[', '1', ';', '1', 'H', '\0'};
- uint64_t port_tx[RTE_MAX_ETHPORTS], port_tx_drop[RTE_MAX_ETHPORTS];
- uint64_t node_tx[MAX_NODES], node_tx_drop[MAX_NODES];
-
- /* to get TX stats, we need to do some summing calculations */
- memset(port_tx, 0, sizeof(port_tx));
- memset(port_tx_drop, 0, sizeof(port_tx_drop));
- memset(node_tx, 0, sizeof(node_tx));
- memset(node_tx_drop, 0, sizeof(node_tx_drop));
-
- for (i = 0; i < num_nodes; i++) {
- const struct tx_stats *tx = &info->tx_stats[i];
-
- for (j = 0; j < info->num_ports; j++) {
- const uint64_t tx_val = tx->tx[info->id[j]];
- const uint64_t drop_val = tx->tx_drop[info->id[j]];
-
- port_tx[j] += tx_val;
- port_tx_drop[j] += drop_val;
- node_tx[i] += tx_val;
- node_tx_drop[i] += drop_val;
- }
- }
-
- /* Clear screen and move to top left */
- printf("%s%s", clr, topLeft);
-
- printf("PORTS\n");
- printf("-----\n");
- for (i = 0; i < info->num_ports; i++)
- printf("Port %u: '%s'\t", (unsigned int)info->id[i],
- get_printable_mac_addr(info->id[i]));
- printf("\n\n");
- for (i = 0; i < info->num_ports; i++) {
- printf("Port %u - rx: %9"PRIu64"\t"
- "tx: %9"PRIu64"\n",
- (unsigned int)info->id[i], info->rx_stats.rx[i],
- port_tx[i]);
- }
-
- printf("\nSERVER\n");
- printf("-----\n");
- printf("distributed: %9"PRIu64", drop: %9"PRIu64"\n",
- flow_dist_stats.distributed, flow_dist_stats.drop);
-
- printf("\nNODES\n");
- printf("-------\n");
- for (i = 0; i < num_nodes; i++) {
- const unsigned long long rx = nodes[i].stats.rx;
- const unsigned long long rx_drop = nodes[i].stats.rx_drop;
- const struct filter_stats *filter = &info->filter_stats[i];
-
- printf("Node %2u - rx: %9llu, rx_drop: %9llu\n"
- " tx: %9"PRIu64", tx_drop: %9"PRIu64"\n"
- " filter_passed: %9"PRIu64", "
- "filter_drop: %9"PRIu64"\n",
- i, rx, rx_drop, node_tx[i], node_tx_drop[i],
- filter->passed, filter->drop);
- }
-
- printf("\n");
- }
+.. literalinclude:: ../../../examples/server_node_efd/server/main.c
+ :language: c
+ :start-after: Display recorded statistics. 8<
+ :end-before: >8 End of displaying the recorded statistics.
@@ -53,28 +53,11 @@ order to register services: ``rte_service_component.h``, in addition
to the ordinary service cores header ``rte_service.h`` which provides
the runtime functions to add, remove and remap service cores.
-.. code-block:: c
-
- struct rte_service_spec service = {
- .name = "service_name",
- };
- int ret = rte_service_component_register(services, &id);
- if (ret)
- return -1;
-
- /* set the service itself to be ready to run. In the case of
- * ethdev, eventdev etc PMDs, this will be set when the
- * appropriate configure or setup function is called.
- */
- rte_service_component_runstate_set(id, 1);
-
- /* Collect statistics for the service */
- rte_service_set_stats_enable(id, 1);
-
- /* The application sets the service to running state. Note that this
- * function enables the service to run - while the 'component' version
- * of this function (as above) marks the service itself as ready */
- ret = rte_service_runstate_set(id, 1);
+.. literalinclude:: ../../../examples/service_cores/main.c
+ :language: c
+ :start-after: Register a service as an application. 8<
+ :end-before: >8 End of registering a service as an application.
+ :dedent: 2
Controlling A Service Core
@@ -87,26 +70,11 @@ functions.
These are the functions to start a service core, and have it run a service:
-.. code-block:: c
-
- /* the lcore ID to use as a service core */
- uint32_t service_core_id = 7;
- ret = rte_service_lcore_add(service_core_id);
- if(ret)
- return -1;
-
- /* service cores are in "stopped" state when added, so start it */
- ret = rte_service_lcore_start(service_core_id);
- if(ret)
- return -1;
-
- /* map a service to the service core, causing it to run the service */
- uint32_t service_id; /* ID of a registered service */
- uint32_t enable = 1; /* 1 maps the service, 0 unmaps */
- ret = rte_service_map_lcore_set(service_id, service_core_id, enable);
- if(ret)
- return -1;
-
+.. literalinclude:: ../../../examples/service_cores/main.c
+ :language: c
+ :start-after: Register a service as an application. 8<
+ :end-before: >8 End of registering a service as an application.
+ :dedent: 2
Removing A Service Core
~~~~~~~~~~~~~~~~~~~~~~~
@@ -51,27 +51,21 @@ The first task is to initialize the Environment Abstraction Layer (EAL). The
``argc`` and ``argv`` arguments are provided to the ``rte_eal_init()``
function. The value returned is the number of parsed arguments:
-.. code-block:: c
-
- int ret = rte_eal_init(argc, argv);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
+.. literalinclude:: ../../../examples/skeleton/basicfwd.c
+ :language: c
+ :start-after: Initializion the Environment Abstraction Layer (EAL). 8<
+ :end-before: >8 End of initializion the Environment Abstraction Layer (EAL).
+ :dedent: 1
The ``main()`` also allocates a mempool to hold the mbufs (Message Buffers)
used by the application:
-.. code-block:: c
-
- mbuf_pool = rte_mempool_create("MBUF_POOL",
- NUM_MBUFS * nb_ports,
- MBUF_SIZE,
- MBUF_CACHE_SIZE,
- sizeof(struct rte_pktmbuf_pool_private),
- rte_pktmbuf_pool_init, NULL,
- rte_pktmbuf_init, NULL,
- rte_socket_id(),
- 0);
+.. literalinclude:: ../../../examples/skeleton/basicfwd.c
+ :language: c
+ :start-after: Allocates mempool to hold the mbufs. 8<
+ :end-before: >8 End of allocating mempool to hold mbuf.
+ :dedent: 1
Mbufs are the packet buffer structure used by DPDK. They are explained in
detail in the "Mbuf Library" section of the *DPDK Programmer's Guide*.
@@ -79,24 +73,22 @@ detail in the "Mbuf Library" section of the *DPDK Programmer's Guide*.
The ``main()`` function also initializes all the ports using the user defined
``port_init()`` function which is explained in the next section:
-.. code-block:: c
-
- RTE_ETH_FOREACH_DEV(portid) {
- if (port_init(portid, mbuf_pool) != 0) {
- rte_exit(EXIT_FAILURE,
- "Cannot init port %" PRIu8 "\n", portid);
- }
- }
-
+.. literalinclude:: ../../../examples/skeleton/basicfwd.c
+ :language: c
+ :start-after: Initializing all ports. 8<
+ :end-before: >8 End of initializing all ports.
+ :dedent: 1
Once the initialization is complete, the application is ready to launch a
function on an lcore. In this example ``lcore_main()`` is called on a single
lcore.
-.. code-block:: c
-
- lcore_main();
+.. literalinclude:: ../../../examples/skeleton/basicfwd.c
+ :language: c
+ :start-after: Called on single lcore. 8<
+ :end-before: >8 End of called on single lcore.
+ :dedent: 1
The ``lcore_main()`` function is explained below.
@@ -108,78 +100,38 @@ The Port Initialization Function
The main functional part of the port initialization used in the Basic
Forwarding application is shown below:
-.. code-block:: c
-
- static inline int
- port_init(uint16_t port, struct rte_mempool *mbuf_pool)
- {
- struct rte_eth_conf port_conf = port_conf_default;
- const uint16_t rx_rings = 1, tx_rings = 1;
- struct rte_ether_addr addr;
- int retval;
- uint16_t q;
-
- if (!rte_eth_dev_is_valid_port(port))
- return -1;
-
- /* Configure the Ethernet device. */
- retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
- if (retval != 0)
- return retval;
-
- /* Allocate and set up 1 RX queue per Ethernet port. */
- for (q = 0; q < rx_rings; q++) {
- retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE,
- rte_eth_dev_socket_id(port), NULL, mbuf_pool);
- if (retval < 0)
- return retval;
- }
-
- /* Allocate and set up 1 TX queue per Ethernet port. */
- for (q = 0; q < tx_rings; q++) {
- retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE,
- rte_eth_dev_socket_id(port), NULL);
- if (retval < 0)
- return retval;
- }
-
- /* Start the Ethernet port. */
- retval = rte_eth_dev_start(port);
- if (retval < 0)
- return retval;
-
- /* Enable RX in promiscuous mode for the Ethernet device. */
- retval = rte_eth_promiscuous_enable(port);
- if (retval != 0)
- return retval;
-
- return 0;
- }
+.. literalinclude:: ../../../examples/skeleton/basicfwd.c
+ :language: c
+ :start-after: Main functional part of port initialization. 8<
+ :end-before: >8 End of main functional part of port initialization.
The Ethernet ports are configured with default settings using the
``rte_eth_dev_configure()`` function and the ``port_conf_default`` struct:
-.. code-block:: c
-
- static const struct rte_eth_conf port_conf_default = {
- .rxmode = { .max_rx_pkt_len = RTE_ETHER_MAX_LEN }
- };
+.. literalinclude:: ../../../examples/skeleton/basicfwd.c
+ :language: c
+ :start-after: Configuration of ethernet ports. 8<
+ :end-before: >8 End of configuration of ethernet ports.
For this example the ports are set up with 1 RX and 1 TX queue using the
``rte_eth_rx_queue_setup()`` and ``rte_eth_tx_queue_setup()`` functions.
The Ethernet port is then started:
-.. code-block:: c
-
- retval = rte_eth_dev_start(port);
+.. literalinclude:: ../../../examples/skeleton/basicfwd.c
+ :language: c
+ :start-after: Starting Ethernet port. 8<
+ :end-before: >8 End of starting of ethernet port.
+ :dedent: 1
Finally the RX port is set in promiscuous mode:
-.. code-block:: c
-
- retval = rte_eth_promiscuous_enable(port);
+.. literalinclude:: ../../../examples/skeleton/basicfwd.c
+ :language: c
+ :start-after: Enable RX in promiscuous mode for the Ethernet device.
+ :end-before: End of setting RX port in promiscuous mode.
+ :dedent: 1
The Lcores Main
@@ -189,86 +141,18 @@ As we saw above the ``main()`` function calls an application function on the
available lcores. For the Basic Forwarding application the lcore function
looks like the following:
-.. code-block:: c
-
- static __rte_noreturn void
- lcore_main(void)
- {
- uint16_t port;
-
- /*
- * Check that the port is on the same NUMA node as the polling thread
- * for best performance.
- */
- RTE_ETH_FOREACH_DEV(port)
- if (rte_eth_dev_socket_id(port) > 0 &&
- rte_eth_dev_socket_id(port) !=
- (int)rte_socket_id())
- printf("WARNING, port %u is on remote NUMA node to "
- "polling thread.\n\tPerformance will "
- "not be optimal.\n", port);
-
- printf("\nCore %u forwarding packets. [Ctrl+C to quit]\n",
- rte_lcore_id());
-
- /* Run until the application is quit or killed. */
- for (;;) {
- /*
- * Receive packets on a port and forward them on the paired
- * port. The mapping is 0 -> 1, 1 -> 0, 2 -> 3, 3 -> 2, etc.
- */
- RTE_ETH_FOREACH_DEV(port) {
-
- /* Get burst of RX packets, from first port of pair. */
- struct rte_mbuf *bufs[BURST_SIZE];
- const uint16_t nb_rx = rte_eth_rx_burst(port, 0,
- bufs, BURST_SIZE);
-
- if (unlikely(nb_rx == 0))
- continue;
-
- /* Send burst of TX packets, to second port of pair. */
- const uint16_t nb_tx = rte_eth_tx_burst(port ^ 1, 0,
- bufs, nb_rx);
-
- /* Free any unsent packets. */
- if (unlikely(nb_tx < nb_rx)) {
- uint16_t buf;
- for (buf = nb_tx; buf < nb_rx; buf++)
- rte_pktmbuf_free(bufs[buf]);
- }
- }
- }
- }
-
+.. literalinclude:: ../../../examples/skeleton/basicfwd.c
+ :language: c
+ :start-after: Basic forwarding application lcore. 8<
+ :end-before: >8 End Basic forwarding application lcore.
The main work of the application is done within the loop:
-.. code-block:: c
-
- for (;;) {
- RTE_ETH_FOREACH_DEV(port) {
-
- /* Get burst of RX packets, from first port of pair. */
- struct rte_mbuf *bufs[BURST_SIZE];
- const uint16_t nb_rx = rte_eth_rx_burst(port, 0,
- bufs, BURST_SIZE);
-
- if (unlikely(nb_rx == 0))
- continue;
-
- /* Send burst of TX packets, to second port of pair. */
- const uint16_t nb_tx = rte_eth_tx_burst(port ^ 1, 0,
- bufs, nb_rx);
-
- /* Free any unsent packets. */
- if (unlikely(nb_tx < nb_rx)) {
- uint16_t buf;
- for (buf = nb_tx; buf < nb_rx; buf++)
- rte_pktmbuf_free(bufs[buf]);
- }
- }
- }
+.. literalinclude:: ../../../examples/skeleton/basicfwd.c
+ :language: c
+ :start-after: Main work of application loop. 8<
+ :end-before: >8 End of loop.
+ :dedent: 1
Packets are received in bursts on the RX ports and transmitted in bursts on
the TX ports. The ports are grouped in pairs with a simple mapping scheme
@@ -36,55 +36,29 @@ Initialization and Main Loop
In addition to EAL initialization, the timer subsystem must be initialized, by calling the rte_timer_subsystem_init() function.
-.. code-block:: c
-
- /* init EAL */
-
- ret = rte_eal_init(argc, argv);
- if (ret < 0)
- rte_panic("Cannot init EAL\n");
-
- /* init RTE timer library */
-
- rte_timer_subsystem_init();
+.. literalinclude:: ../../../examples/timer/main.c
+ :language: c
+ :start-after: Init EAL. 8<
+ :end-before: >8 End of init EAL.
+ :dedent: 1
After timer creation (see the next paragraph), the main loop is
executed on each worker lcore using the well-known
rte_eal_remote_launch() and also on the main.
-.. code-block:: c
-
- /* call lcore_mainloop() on every worker lcore */
- RTE_LCORE_FOREACH_WORKER(lcore_id) {
- rte_eal_remote_launch(lcore_mainloop, NULL, lcore_id);
- }
-
- /* call it on main lcore too */
-
- (void) lcore_mainloop(NULL);
+.. literalinclude:: ../../../examples/timer/main.c
+ :language: c
+ :start-after: Call lcore_mainloop() on every worker lcore. 8<
+ :end-before: >8 End of call lcore_mainloop() on every worker lcore.
+ :dedent: 1
The main loop is very simple in this example:
-.. code-block:: c
-
- while (1) {
- /*
- * Call the timer handler on each core: as we don't
- * need a very precise timer, so only call
- * rte_timer_manage() every ~10ms (at 2 GHz). In a real
- * application, this will enhance performances as
- * reading the HPET timer is not efficient.
- */
-
- cur_tsc = rte_rdtsc();
-
- diff_tsc = cur_tsc - prev_tsc;
-
- if (diff_tsc > TIMER_RESOLUTION_CYCLES) {
- rte_timer_manage();
- prev_tsc = cur_tsc;
- }
- }
+.. literalinclude:: ../../../examples/timer/main.c
+ :language: c
+ :start-after: Main loop. 8<
+ :end-before: >8 End of main loop.
+ :dedent: 1
As explained in the comment, it is better to use the TSC register (as it is a per-lcore register) to check if the
rte_timer_manage() function must be called or not.
@@ -96,12 +70,11 @@ Managing Timers
In the main() function, the two timers are initialized.
This call to rte_timer_init() is necessary before doing any other operation on the timer structure.
-.. code-block:: c
-
- /* init timer structures */
-
- rte_timer_init(&timer0);
- rte_timer_init(&timer1);
+.. literalinclude:: ../../../examples/timer/main.c
+ :language: c
+ :start-after: Init timer structures. 8<
+ :end-before: >8 End of init timer structures.
+ :dedent: 1
Then, the two timers are configured:
@@ -113,64 +86,24 @@ Then, the two timers are configured:
The SINGLE flag means that the timer expires only once and must be reloaded manually if required.
The callback function is timer1_cb().
-.. code-block:: c
-
- /* load timer0, every second, on main lcore, reloaded automatically */
-
- hz = rte_get_hpet_hz();
-
- lcore_id = rte_lcore_id();
-
- rte_timer_reset(&timer0, hz, PERIODICAL, lcore_id, timer0_cb, NULL);
-
- /* load timer1, every second/3, on next lcore, reloaded manually */
-
- lcore_id = rte_get_next_lcore(lcore_id, 0, 1);
-
- rte_timer_reset(&timer1, hz/3, SINGLE, lcore_id, timer1_cb, NULL);
+.. literalinclude:: ../../../examples/timer/main.c
+ :language: c
+ :start-after: Load timer0, every second, on main lcore, reloaded automatically. 8<
+ :end-before: >8 End of two timers configured.
+ :dedent: 1
The callback for the first timer (timer0) only displays a message until a global counter reaches 20 (after 20 seconds).
In this case, the timer is stopped using the rte_timer_stop() function.
-.. code-block:: c
-
- /* timer0 callback */
-
- static void
- timer0_cb(__rte_unused struct rte_timer *tim, __rte_unused void *arg)
- {
- static unsigned counter = 0;
-
- unsigned lcore_id = rte_lcore_id();
-
- printf("%s() on lcore %u\n", FUNCTION , lcore_id);
-
- /* this timer is automatically reloaded until we decide to stop it, when counter reaches 20. */
-
- if ((counter ++) == 20)
- rte_timer_stop(tim);
- }
+.. literalinclude:: ../../../examples/timer/main.c
+ :language: c
+ :start-after: timer0 callback. 8<
+ :end-before: >8 End of timer0 callback.
The callback for the second timer (timer1) displays a message and reloads the timer on the next lcore, using the
rte_timer_reset() function:
-.. code-block:: c
-
- /* timer1 callback */
-
- static void
- timer1_cb(__rte_unused struct rte_timer *tim, __rte_unused void *arg)
- {
- unsigned lcore_id = rte_lcore_id();
- uint64_t hz;
-
- printf("%s() on lcore %u\\n", FUNCTION , lcore_id);
-
- /* reload it on another lcore */
-
- hz = rte_get_hpet_hz();
-
- lcore_id = rte_get_next_lcore(lcore_id, 0, 1);
-
- rte_timer_reset(&timer1, hz/3, SINGLE, lcore_id, timer1_cb, NULL);
- }
+.. literalinclude:: ../../../examples/timer/main.c
+ :language: c
+ :start-after: timer1 callback. 8<
+ :end-before: >8 End of timer1 callback.
@@ -95,50 +95,10 @@ rte_eth_conf structure passed to the rte_eth_dev_configure() API.
Initially in the application,
a default structure is provided for VMDQ and DCB configuration to be filled in later by the application.
-.. code-block:: c
-
- /* empty vmdq+dcb configuration structure. Filled in programmatically */
- static const struct rte_eth_conf vmdq_dcb_conf_default = {
- .rxmode = {
- .mq_mode = ETH_MQ_RX_VMDQ_DCB,
- .split_hdr_size = 0,
- },
- .txmode = {
- .mq_mode = ETH_MQ_TX_VMDQ_DCB,
- },
- /*
- * should be overridden separately in code with
- * appropriate values
- */
- .rx_adv_conf = {
- .vmdq_dcb_conf = {
- .nb_queue_pools = ETH_32_POOLS,
- .enable_default_pool = 0,
- .default_pool = 0,
- .nb_pool_maps = 0,
- .pool_map = {{0, 0},},
- .dcb_tc = {0},
- },
- .dcb_rx_conf = {
- .nb_tcs = ETH_4_TCS,
- /** Traffic class each UP mapped to. */
- .dcb_tc = {0},
- },
- .vmdq_rx_conf = {
- .nb_queue_pools = ETH_32_POOLS,
- .enable_default_pool = 0,
- .default_pool = 0,
- .nb_pool_maps = 0,
- .pool_map = {{0, 0},},
- },
- },
- .tx_adv_conf = {
- .vmdq_dcb_tx_conf = {
- .nb_queue_pools = ETH_32_POOLS,
- .dcb_tc = {0},
- },
- },
- };
+.. literalinclude:: ../../../examples/vmdq_dcb/main.c
+ :language: c
+ :start-after: Empty vmdq+dcb configuration structure. Filled in programmatically. 8<
+ :end-before: >8 End of empty vmdq+dcb configuration structure.
The get_eth_conf() function fills in an rte_eth_conf structure with the appropriate values,
based on the global vlan_tags array,
@@ -155,92 +115,16 @@ For destination MAC, each VMDQ pool will be assigned with a MAC address. In this
is assigned to the MAC like 52:54:00:12:<port_id>:<pool_id>, that is,
the MAC of VMDQ pool 2 on port 1 is 52:54:00:12:01:02.
-.. code-block:: c
-
- const uint16_t vlan_tags[] = {
- 0, 1, 2, 3, 4, 5, 6, 7,
- 8, 9, 10, 11, 12, 13, 14, 15,
- 16, 17, 18, 19, 20, 21, 22, 23,
- 24, 25, 26, 27, 28, 29, 30, 31
- };
-
- /* pool mac addr template, pool mac addr is like: 52 54 00 12 port# pool# */
- static struct rte_ether_addr pool_addr_template = {
- .addr_bytes = {0x52, 0x54, 0x00, 0x12, 0x00, 0x00}
- };
-
- /* Builds up the correct configuration for vmdq+dcb based on the vlan tags array
- * given above, and the number of traffic classes available for use. */
- static inline int
- get_eth_conf(struct rte_eth_conf *eth_conf)
- {
- struct rte_eth_vmdq_dcb_conf conf;
- struct rte_eth_vmdq_rx_conf vmdq_conf;
- struct rte_eth_dcb_rx_conf dcb_conf;
- struct rte_eth_vmdq_dcb_tx_conf tx_conf;
- uint8_t i;
-
- conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools;
- vmdq_conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools;
- tx_conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools;
- conf.nb_pool_maps = num_pools;
- vmdq_conf.nb_pool_maps = num_pools;
- conf.enable_default_pool = 0;
- vmdq_conf.enable_default_pool = 0;
- conf.default_pool = 0; /* set explicit value, even if not used */
- vmdq_conf.default_pool = 0;
-
- for (i = 0; i < conf.nb_pool_maps; i++) {
- conf.pool_map[i].vlan_id = vlan_tags[i];
- vmdq_conf.pool_map[i].vlan_id = vlan_tags[i];
- conf.pool_map[i].pools = 1UL << i ;
- vmdq_conf.pool_map[i].pools = 1UL << i;
- }
- for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
- conf.dcb_tc[i] = i % num_tcs;
- dcb_conf.dcb_tc[i] = i % num_tcs;
- tx_conf.dcb_tc[i] = i % num_tcs;
- }
- dcb_conf.nb_tcs = (enum rte_eth_nb_tcs)num_tcs;
- (void)(rte_memcpy(eth_conf, &vmdq_dcb_conf_default, sizeof(*eth_conf)));
- (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_dcb_conf, &conf,
- sizeof(conf)));
- (void)(rte_memcpy(ð_conf->rx_adv_conf.dcb_rx_conf, &dcb_conf,
- sizeof(dcb_conf)));
- (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &vmdq_conf,
- sizeof(vmdq_conf)));
- (void)(rte_memcpy(ð_conf->tx_adv_conf.vmdq_dcb_tx_conf, &tx_conf,
- sizeof(tx_conf)));
- if (rss_enable) {
- eth_conf->rxmode.mq_mode= ETH_MQ_RX_VMDQ_DCB_RSS;
- eth_conf->rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP |
- ETH_RSS_UDP |
- ETH_RSS_TCP |
- ETH_RSS_SCTP;
- }
- return 0;
- }
-
- ......
-
- /* Set mac for each pool.*/
- for (q = 0; q < num_pools; q++) {
- struct rte_ether_addr mac;
- mac = pool_addr_template;
- mac.addr_bytes[4] = port;
- mac.addr_bytes[5] = q;
- printf("Port %u vmdq pool %u set mac %02x:%02x:%02x:%02x:%02x:%02x\n",
- port, q,
- mac.addr_bytes[0], mac.addr_bytes[1],
- mac.addr_bytes[2], mac.addr_bytes[3],
- mac.addr_bytes[4], mac.addr_bytes[5]);
- retval = rte_eth_dev_mac_addr_add(port, &mac,
- q + vmdq_pool_base);
- if (retval) {
- printf("mac addr add failed at pool %d\n", q);
- return retval;
- }
- }
+.. literalinclude:: ../../../examples/vmdq_dcb/main.c
+ :language: c
+ :start-after: Dividing up the possible user priority values. 8<
+ :end-before: >8 End of dividing up the possible user priority values.
+
+.. literalinclude:: ../../../examples/vmdq_dcb/main.c
+ :language: c
+ :start-after: Set mac for each pool. 8<
+ :end-before: >8 End of set mac for each pool.
+ :dedent: 1
Once the network port has been initialized using the correct VMDQ and DCB values,
the initialization of the port's RX and TX hardware rings is performed similarly to that
@@ -79,32 +79,10 @@ rte_eth_conf structure passed to the rte_eth_dev_configure() API.
Initially in the application,
a default structure is provided for VMDq configuration to be filled in later by the application.
-.. code-block:: c
-
- /* empty vmdq configuration structure. Filled in programmatically */
- static const struct rte_eth_conf vmdq_conf_default = {
- .rxmode = {
- .mq_mode = ETH_MQ_RX_VMDQ_ONLY,
- .split_hdr_size = 0,
- },
-
- .txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
- },
- .rx_adv_conf = {
- /*
- * should be overridden separately in code with
- * appropriate values
- */
- .vmdq_rx_conf = {
- .nb_queue_pools = ETH_8_POOLS,
- .enable_default_pool = 0,
- .default_pool = 0,
- .nb_pool_maps = 0,
- .pool_map = {{0, 0},},
- },
- },
- };
+.. literalinclude:: ../../../examples/vmdq/main.c
+ :language: c
+ :start-after: Default structure for VMDq. 8<
+ :end-before: >8 End of Empty vdmq configuration structure.
The get_eth_conf() function fills in an rte_eth_conf structure with the appropriate values,
based on the global vlan_tags array.
@@ -113,75 +91,20 @@ For destination MAC, each VMDq pool will be assigned with a MAC address. In this
is assigned to the MAC like 52:54:00:12:<port_id>:<pool_id>, that is,
the MAC of VMDq pool 2 on port 1 is 52:54:00:12:01:02.
-.. code-block:: c
-
- const uint16_t vlan_tags[] = {
- 0, 1, 2, 3, 4, 5, 6, 7,
- 8, 9, 10, 11, 12, 13, 14, 15,
- 16, 17, 18, 19, 20, 21, 22, 23,
- 24, 25, 26, 27, 28, 29, 30, 31,
- 32, 33, 34, 35, 36, 37, 38, 39,
- 40, 41, 42, 43, 44, 45, 46, 47,
- 48, 49, 50, 51, 52, 53, 54, 55,
- 56, 57, 58, 59, 60, 61, 62, 63,
- };
-
- /* pool mac addr template, pool mac addr is like: 52 54 00 12 port# pool# */
- static struct rte_ether_addr pool_addr_template = {
- .addr_bytes = {0x52, 0x54, 0x00, 0x12, 0x00, 0x00}
- };
-
- /*
- * Builds up the correct configuration for vmdq based on the vlan tags array
- * given above, and determine the queue number and pool map number according to
- * valid pool number
- */
- static inline int
- get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_pools)
- {
- struct rte_eth_vmdq_rx_conf conf;
- unsigned i;
-
- conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools;
- conf.nb_pool_maps = num_pools;
- conf.enable_default_pool = 0;
- conf.default_pool = 0; /* set explicit value, even if not used */
-
- for (i = 0; i < conf.nb_pool_maps; i++) {
- conf.pool_map[i].vlan_id = vlan_tags[i];
- conf.pool_map[i].pools = (1UL << (i % num_pools));
- }
-
- (void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
- (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &conf,
- sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
- return 0;
- }
-
- ......
-
- /*
- * Set mac for each pool.
- * There is no default mac for the pools in i40.
- * Removes this after i40e fixes this issue.
- */
- for (q = 0; q < num_pools; q++) {
- struct rte_ether_addr mac;
- mac = pool_addr_template;
- mac.addr_bytes[4] = port;
- mac.addr_bytes[5] = q;
- printf("Port %u vmdq pool %u set mac %02x:%02x:%02x:%02x:%02x:%02x\n",
- port, q,
- mac.addr_bytes[0], mac.addr_bytes[1],
- mac.addr_bytes[2], mac.addr_bytes[3],
- mac.addr_bytes[4], mac.addr_bytes[5]);
- retval = rte_eth_dev_mac_addr_add(port, &mac,
- q + vmdq_pool_base);
- if (retval) {
- printf("mac addr add failed at pool %d\n", q);
- return retval;
- }
- }
+.. literalinclude:: ../../../examples/vmdq/main.c
+ :language: c
+ :start-after: vlan_tags 8<
+ :end-before: >8 End of vlan_tags.
+
+.. literalinclude:: ../../../examples/vmdq/main.c
+ :language: c
+ :start-after: Pool mac address template. 8<
+ :end-before: >8 End of mac addr template.
+
+.. literalinclude:: ../../../examples/vmdq/main.c
+ :language: c
+ :start-after: Building correct configruration for vdmq. 8<
+ :end-before: >8 End of get_eth_conf.
Once the network port has been initialized using the correct VMDq values,
the initialization of the port's RX and TX hardware rings is performed similarly to that
@@ -57,6 +57,7 @@ struct object_list global_obj_list;
/**********************************************************/
+/* Show or delete tokens. 8< */
struct cmd_obj_del_show_result {
cmdline_fixed_string_t action;
struct object *obj;
@@ -105,6 +106,7 @@ cmdline_parse_inst_t cmd_obj_del_show = {
NULL,
},
};
+/* >8 End of show or delete tokens. */
/**********************************************************/
@@ -211,9 +213,11 @@ cmdline_parse_inst_t cmd_help = {
/**********************************************************/
/****** CONTEXT (list of instruction) */
+/* Cmdline context list of commands in NULL-terminated table. 8< */
cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_obj_del_show,
(cmdline_parse_inst_t *)&cmd_obj_add,
(cmdline_parse_inst_t *)&cmd_help,
NULL,
};
+/* >8 End of context list. */
@@ -21,6 +21,7 @@
#include "commands.h"
+/* Initialization of the Environment Abstraction Layer (EAL). 8< */
int main(int argc, char **argv)
{
int ret;
@@ -29,12 +30,15 @@ int main(int argc, char **argv)
ret = rte_eal_init(argc, argv);
if (ret < 0)
rte_panic("Cannot init EAL\n");
+ /* >8 End of initialization of Environment Abstraction Layer (EAL). */
+ /* Creating a new command line object. 8< */
cl = cmdline_stdin_new(main_ctx, "example> ");
if (cl == NULL)
rte_panic("Cannot create cmdline instance\n");
cmdline_interact(cl);
cmdline_stdin_exit(cl);
+ /* >8 End of creating a new command line object. */
/* clean up the EAL */
rte_eal_cleanup();
@@ -59,12 +59,15 @@ static struct{
} parm_config;
const char cb_port_delim[] = ":";
+/* Ethernet ports configured with default settings using struct. 8< */
static const struct rte_eth_conf port_conf_default = {
.rxmode = {
.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
},
};
+/* >8 End of configuration of Ethernet ports. */
+/* Creation of flow classifier object. 8< */
struct flow_classifier {
struct rte_flow_classifier *cls;
};
@@ -72,9 +75,11 @@ struct flow_classifier {
struct flow_classifier_acl {
struct flow_classifier cls;
} __rte_cache_aligned;
+/* >8 End of creation of flow classifier object. */
-/* ACL field definitions for IPv4 5 tuple rule */
+/* Creation of ACL table during initialization of application. 8< */
+/* ACL field definitions for IPv4 5 tuple rule */
enum {
PROTO_FIELD_IPV4,
SRC_FIELD_IPV4,
@@ -146,14 +151,16 @@ static struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = {
offsetof(struct rte_tcp_hdr, dst_port),
},
};
+/* >8 End of creation of ACL table. */
-/* flow classify data */
+/* Flow classify data. 8< */
static int num_classify_rules;
static struct rte_flow_classify_rule *rules[MAX_NUM_CLASSIFY];
static struct rte_flow_classify_ipv4_5tuple_stats ntuple_stats;
static struct rte_flow_classify_stats classify_stats = {
.stats = (void **)&ntuple_stats
};
+/* >8 End of flow classify data. */
/* parameters for rte_flow_classify_validate and
* rte_flow_classify_table_entry_add functions
@@ -188,6 +195,8 @@ static struct rte_flow_attr attr;
* Initializes a given port using global settings and with the RX buffers
* coming from the mbuf_pool passed as a parameter.
*/
+
+/* Initializing port using global settings. 8< */
static inline int
port_init(uint8_t port, struct rte_mempool *mbuf_pool)
{
@@ -236,8 +245,9 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
return retval;
}
- /* Start the Ethernet port. */
+ /* Start the Ethernet port. 8< */
retval = rte_eth_dev_start(port);
+ /* >8 End of starting the Ethernet port. */
if (retval < 0)
return retval;
@@ -260,11 +270,14 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
return 0;
}
+/* >8 End of initializing a given port. */
/*
* The lcore main. This is the main thread that does the work, reading from
* an input port classifying the packets and writing to an output port.
*/
+
+/* Classifying the packets. 8< */
static __rte_noreturn void
lcore_main(struct flow_classifier *cls_app)
{
@@ -295,7 +308,7 @@ lcore_main(struct flow_classifier *cls_app)
printf("\nCore %u forwarding packets. ", rte_lcore_id());
printf("[Ctrl+C to quit]\n");
- /* Run until the application is quit or killed. */
+ /* Run until the application is quit or killed. 8< */
for (;;) {
/*
* Receive packets on a port, classify them and forward them
@@ -345,7 +358,9 @@ lcore_main(struct flow_classifier *cls_app)
}
}
}
+ /* >8 End of main loop. */
}
+/* >8 End of lcore main. */
/*
* Parse IPv4 5 tuple rules file, ipv4_rules_file.txt.
@@ -655,6 +670,7 @@ add_classify_rule(struct rte_eth_ntuple_filter *ntuple_filter,
return 0;
}
+/* Reads file and calls the add_classify_rule function. 8< */
static int
add_rules(const char *rule_path, struct flow_classifier *cls_app)
{
@@ -702,6 +718,7 @@ add_rules(const char *rule_path, struct flow_classifier *cls_app)
fclose(fh);
return 0;
}
+/* >8 End of add_rules. */
/* display usage */
static void
@@ -771,43 +788,47 @@ main(int argc, char *argv[])
struct rte_flow_classifier_params cls_params;
uint32_t size;
- /* Initialize the Environment Abstraction Layer (EAL). */
+ /* Initialize the Environment Abstraction Layer (EAL). 8< */
ret = rte_eal_init(argc, argv);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
+ /* >8 End of initialization of EAL. */
argc -= ret;
argv += ret;
- /* parse application arguments (after the EAL ones) */
+ /* Parse application arguments (after the EAL ones). 8< */
ret = parse_args(argc, argv);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Invalid flow_classify parameters\n");
+ /* >8 End of parse application arguments. */
/* Check that there is an even number of ports to send/receive on. */
nb_ports = rte_eth_dev_count_avail();
if (nb_ports < 2 || (nb_ports & 1))
rte_exit(EXIT_FAILURE, "Error: number of ports must be even\n");
- /* Creates a new mempool in memory to hold the mbufs. */
+ /* Creates a new mempool in memory to hold the mbufs. 8< */
mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", NUM_MBUFS * nb_ports,
MBUF_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
+ /* >8 End of creation of new mempool in memory. */
if (mbuf_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
- /* Initialize all ports. */
+ /* Initialize all ports. 8< */
RTE_ETH_FOREACH_DEV(portid)
if (port_init(portid, mbuf_pool) != 0)
rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8 "\n",
portid);
+ /* >8 End of initialization of all ports. */
if (rte_lcore_count() > 1)
printf("\nWARNING: Too many lcores enabled. Only 1 used.\n");
socket_id = rte_eth_dev_socket_id(0);
- /* Memory allocation */
+ /* Memory allocation. 8< */
size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct flow_classifier_acl));
cls_app = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
if (cls_app == NULL)
@@ -839,16 +860,20 @@ main(int argc, char *argv[])
rte_free(cls_app);
rte_exit(EXIT_FAILURE, "Failed to create classifier table\n");
}
+ /* >8 End of initialization of table create params. */
/* read file of IPv4 5 tuple rules and initialize parameters
* for rte_flow_classify_validate and rte_flow_classify_table_entry_add
* API's.
*/
+
+ /* Read file of IPv4 tuple rules. 8< */
if (add_rules(parm_config.rule_ipv4_name, cls_app)) {
rte_flow_classifier_free(cls_app->cls);
rte_free(cls_app);
rte_exit(EXIT_FAILURE, "Failed to add rules\n");
}
+ /* >8 End of reading file of IPv4 5 tuple rules. */
/* Call lcore_main on the main core only. */
lcore_main(cls_app);
@@ -34,12 +34,15 @@ generate_ipv4_flow(uint16_t port_id, uint16_t rx_q,
* @return
* A flow if the rule could be created else return NULL.
*/
+
+/* Function responsible for creating the flow rule. 8< */
struct rte_flow *
generate_ipv4_flow(uint16_t port_id, uint16_t rx_q,
uint32_t src_ip, uint32_t src_mask,
uint32_t dest_ip, uint32_t dest_mask,
struct rte_flow_error *error)
{
+ /* Declaring structs being used. 8< */
struct rte_flow_attr attr;
struct rte_flow_item pattern[MAX_PATTERN_NUM];
struct rte_flow_action action[MAX_ACTION_NUM];
@@ -47,17 +50,16 @@ generate_ipv4_flow(uint16_t port_id, uint16_t rx_q,
struct rte_flow_action_queue queue = { .index = rx_q };
struct rte_flow_item_ipv4 ip_spec;
struct rte_flow_item_ipv4 ip_mask;
+ /* >8 End of declaring structs being used. */
int res;
memset(pattern, 0, sizeof(pattern));
memset(action, 0, sizeof(action));
- /*
- * set the rule attribute.
- * in this case only ingress packets will be checked.
- */
+ /* Set the rule attribute, only ingress packets will be checked. 8< */
memset(&attr, 0, sizeof(struct rte_flow_attr));
attr.ingress = 1;
+ /* >8 End of setting the rule attribute. */
/*
* create the action sequence.
@@ -72,13 +74,18 @@ generate_ipv4_flow(uint16_t port_id, uint16_t rx_q,
* since in this example we just want to get the
* ipv4 we set this level to allow all.
*/
+
+ /* IPv4 we set this level to allow all. 8< */
pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
+ /* >8 End of setting the first level of the pattern. */
/*
* setting the second level of the pattern (IP).
* in this example this is the level we care about
* so we set it according to the parameters.
*/
+
+ /* Setting the second level of the pattern. 8< */
memset(&ip_spec, 0, sizeof(struct rte_flow_item_ipv4));
memset(&ip_mask, 0, sizeof(struct rte_flow_item_ipv4));
ip_spec.hdr.dst_addr = htonl(dest_ip);
@@ -88,13 +95,18 @@ generate_ipv4_flow(uint16_t port_id, uint16_t rx_q,
pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
pattern[1].spec = &ip_spec;
pattern[1].mask = &ip_mask;
+ /* >8 End of setting the second level of the pattern. */
- /* the final level must be always type end */
+ /* The final level must be always type end. 8< */
pattern[2].type = RTE_FLOW_ITEM_TYPE_END;
+ /* >8 End of final level must be always type end. */
+ /* Validate the rule and create it. 8< */
res = rte_flow_validate(port_id, &attr, pattern, action, error);
if (!res)
flow = rte_flow_create(port_id, &attr, pattern, action, error);
+ /* >8 End of validation the rule and create it. */
return flow;
}
+/* >8 End of function responsible for creating the flow rule. */
@@ -51,6 +51,7 @@ print_ether_addr(const char *what, struct rte_ether_addr *eth_addr)
printf("%s%s", what, buf);
}
+/* Main_loop for flow filtering. 8< */
static int
main_loop(void)
{
@@ -62,6 +63,7 @@ main_loop(void)
uint16_t j;
int ret;
+ /* Reading the packets from all queues. 8< */
while (!force_quit) {
for (i = 0; i < nr_queues; i++) {
nb_rx = rte_eth_rx_burst(port_id,
@@ -85,6 +87,7 @@ main_loop(void)
}
}
}
+ /* >8 End of reading the packets from all queues. */
/* closing and releasing resources */
rte_flow_flush(port_id, &error);
@@ -95,6 +98,7 @@ main_loop(void)
rte_eth_dev_close(port_id);
return ret;
}
+/* >8 End of main_loop for flow filtering. */
#define CHECK_INTERVAL 1000 /* 100ms */
#define MAX_REPEAT_TIMES 90 /* 9s (90 * 100ms) in total */
@@ -121,11 +125,13 @@ assert_link_status(void)
rte_exit(EXIT_FAILURE, ":: error: link is still down\n");
}
+/* Port initialization used in flow filtering. 8< */
static void
init_port(void)
{
int ret;
uint16_t i;
+ /* Ethernet port configured with default settings. 8< */
struct rte_eth_conf port_conf = {
.rxmode = {
.split_hdr_size = 0,
@@ -162,6 +168,9 @@ init_port(void)
rxq_conf = dev_info.default_rxconf;
rxq_conf.offloads = port_conf.rxmode.offloads;
+ /* >8 End of ethernet port configured with default settings. */
+
+ /* Configuring number of RX and TX queues connected to single port. 8< */
for (i = 0; i < nr_queues; i++) {
ret = rte_eth_rx_queue_setup(port_id, i, 512,
rte_eth_dev_socket_id(port_id),
@@ -187,24 +196,30 @@ init_port(void)
ret, port_id);
}
}
+ /* >8 End of Configuring RX and TX queues connected to single port. */
+ /* Setting the RX port to promiscuous mode. 8< */
ret = rte_eth_promiscuous_enable(port_id);
if (ret != 0)
rte_exit(EXIT_FAILURE,
":: promiscuous mode enable failed: err=%s, port=%u\n",
rte_strerror(-ret), port_id);
+ /* >8 End of setting the RX port to promiscuous mode. */
+ /* Starting the port. 8< */
ret = rte_eth_dev_start(port_id);
if (ret < 0) {
rte_exit(EXIT_FAILURE,
"rte_eth_dev_start:err=%d, port=%u\n",
ret, port_id);
}
+ /* >8 End of starting the port. */
assert_link_status();
printf(":: initializing port: %d done\n", port_id);
}
+/* >8 End of Port initialization used in flow filtering. */
static void
signal_handler(int signum)
@@ -223,9 +238,11 @@ main(int argc, char **argv)
uint16_t nr_ports;
struct rte_flow_error error;
+ /* Initialize EAL. 8< */
ret = rte_eal_init(argc, argv);
if (ret < 0)
rte_exit(EXIT_FAILURE, ":: invalid EAL arguments\n");
+ /* >8 End of Initialization of EAL. */
force_quit = false;
signal(SIGINT, signal_handler);
@@ -239,26 +256,34 @@ main(int argc, char **argv)
printf(":: warn: %d ports detected, but we use only one: port %u\n",
nr_ports, port_id);
}
+ /* Allocates a mempool to hold the mbufs. 8< */
mbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", 4096, 128, 0,
RTE_MBUF_DEFAULT_BUF_SIZE,
rte_socket_id());
+ /* >8 End of allocating a mempool to hold the mbufs. */
if (mbuf_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n");
+ /* Initializes all the ports using the user defined init_port(). 8< */
init_port();
+ /* >8 End of Initializing the ports using user defined init_port(). */
- /* create flow for send packet with */
+ /* Create flow for send packet with. 8< */
flow = generate_ipv4_flow(port_id, selected_queue,
SRC_IP, EMPTY_MASK,
DEST_IP, FULL_MASK, &error);
+ /* >8 End of create flow and the flow rule. */
if (!flow) {
printf("Flow can't be created %d message: %s\n",
error.type,
error.message ? error.message : "(no stated reason)");
rte_exit(EXIT_FAILURE, "error in creating flow");
}
+ /* >8 End of creating flow for send packet with. */
+ /* Launching main_loop(). 8< */
ret = main_loop();
+ /* >8 End of launching main_loop(). */
/* clean up the EAL */
rte_eal_cleanup();
@@ -15,6 +15,7 @@
#include <rte_lcore.h>
#include <rte_debug.h>
+/* Launch a function on lcore. 8< */
static int
lcore_hello(__rte_unused void *arg)
{
@@ -23,7 +24,9 @@ lcore_hello(__rte_unused void *arg)
printf("hello from core %u\n", lcore_id);
return 0;
}
+/* >8 End of launching function on lcore. */
+/* Initialization of Environment Abstraction Layer (EAL). 8< */
int
main(int argc, char **argv)
{
@@ -33,14 +36,18 @@ main(int argc, char **argv)
ret = rte_eal_init(argc, argv);
if (ret < 0)
rte_panic("Cannot init EAL\n");
+ /* >8 End of initialization of Environment Abstraction Layer */
- /* call lcore_hello() on every worker lcore */
+ /* Launches the function on each lcore. 8< */
RTE_LCORE_FOREACH_WORKER(lcore_id) {
+ /* Simpler equivalent. 8< */
rte_eal_remote_launch(lcore_hello, NULL, lcore_id);
+ /* >8 End of simpler equivalent. */
}
/* call it on main lcore too */
lcore_hello(NULL);
+ /* >8 End of launching the function on each lcore. */
rte_eal_mp_wait_lcore();
@@ -42,11 +42,13 @@ struct rxtx_port_config {
uint16_t ioat_ids[MAX_RX_QUEUES_COUNT];
};
+/* Configuring ports and number of assigned lcores in struct. 8< */
struct rxtx_transmission_config {
struct rxtx_port_config ports[RTE_MAX_ETHPORTS];
uint16_t nb_ports;
uint16_t nb_lcores;
};
+/* >8 End of configuration of ports and number of assigned lcores. */
/* per-port statistics struct */
struct ioat_port_statistics {
@@ -327,6 +329,7 @@ update_mac_addrs(struct rte_mbuf *m, uint32_t dest_portid)
rte_ether_addr_copy(&ioat_ports_eth_addr[dest_portid], ð->s_addr);
}
+/* Perform packet copy there is a user-defined function. 8< */
static inline void
pktmbuf_sw_copy(struct rte_mbuf *src, struct rte_mbuf *dst)
{
@@ -340,6 +343,7 @@ pktmbuf_sw_copy(struct rte_mbuf *src, struct rte_mbuf *dst)
rte_memcpy(rte_pktmbuf_mtod(dst, char *),
rte_pktmbuf_mtod(src, char *), src->data_len);
}
+/* >8 End of perform packet copy there is a user-defined function. */
static uint32_t
ioat_enqueue_packets(struct rte_mbuf **pkts,
@@ -380,7 +384,7 @@ ioat_enqueue_packets(struct rte_mbuf **pkts,
return ret;
}
-/* Receive packets on one port and enqueue to IOAT rawdev or rte_ring. */
+/* Receive packets on one port and enqueue to IOAT rawdev or rte_ring. 8< */
static void
ioat_rx_port(struct rxtx_port_config *rx_config)
{
@@ -436,8 +440,9 @@ ioat_rx_port(struct rxtx_port_config *rx_config)
(nb_rx - nb_enq);
}
}
+/* >8 End of receive packets on one port and enqueue to IOAT rawdev or rte_ring. */
-/* Transmit packets from IOAT rawdev/rte_ring for one port. */
+/* Transmit packets from IOAT rawdev/rte_ring for one port. 8< */
static void
ioat_tx_port(struct rxtx_port_config *tx_config)
{
@@ -488,6 +493,7 @@ ioat_tx_port(struct rxtx_port_config *tx_config)
nb_dq - nb_tx);
}
}
+/* >8 End of transmitting packets from IOAT. */
/* Main rx processing loop for IOAT rawdev. */
static void
@@ -536,6 +542,7 @@ rxtx_main_loop(void)
}
}
+/* Start processing for each lcore. 8< */
static void start_forwarding_cores(void)
{
uint32_t lcore_id = rte_lcore_id();
@@ -557,6 +564,7 @@ static void start_forwarding_cores(void)
lcore_id);
}
}
+/* >8 End of starting to processfor each lcore. */
/* Display usage */
static void
@@ -725,6 +733,7 @@ check_link_status(uint32_t port_mask)
return link_status;
}
+/* Configuration of device. 8< */
static void
configure_rawdev_queue(uint32_t dev_id)
{
@@ -743,7 +752,9 @@ configure_rawdev_queue(uint32_t dev_id)
"Error with rte_rawdev_start()\n");
}
}
+/* >8 End of configuration of device. */
+/* Using IOAT rawdev API functions. 8< */
static void
assign_rawdevs(void)
{
@@ -774,7 +785,9 @@ assign_rawdevs(void)
nb_rawdev, cfg.nb_ports * cfg.ports[0].nb_queues);
RTE_LOG(INFO, IOAT, "Number of used rawdevs: %u.\n", nb_rawdev);
}
+/* >8 End of using IOAT rawdev API functions. */
+/* Assign ring structures for packet exchanging. 8< */
static void
assign_rings(void)
{
@@ -794,6 +807,7 @@ assign_rings(void)
rte_strerror(rte_errno));
}
}
+/* >8 End of assigning ring structures for packet exchanging. */
/*
* Initializes a given port using global settings and with the RX buffers
@@ -802,7 +816,7 @@ assign_rings(void)
static inline void
port_init(uint16_t portid, struct rte_mempool *mbuf_pool, uint16_t nb_queues)
{
- /* configuring port to use RSS for multiple RX queues */
+ /* Configuring port to use RSS for multiple RX queues. 8< */
static const struct rte_eth_conf port_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
@@ -815,6 +829,7 @@ port_init(uint16_t portid, struct rte_mempool *mbuf_pool, uint16_t nb_queues)
}
}
};
+ /* >8 End of configuring port to use RSS for multiple RX queues. */
struct rte_eth_rxconf rxq_conf;
struct rte_eth_txconf txq_conf;
@@ -898,14 +913,17 @@ port_init(uint16_t portid, struct rte_mempool *mbuf_pool, uint16_t nb_queues)
"Cannot set error callback for tx buffer on port %u\n",
portid);
- /* Start device */
+ /* Start device. 8< */
ret = rte_eth_dev_start(portid);
if (ret < 0)
rte_exit(EXIT_FAILURE,
"rte_eth_dev_start:err=%d, port=%u\n",
ret, portid);
+ /* >8 End of starting device. */
+ /* RX port is set in promiscuous mode. 8< */
rte_eth_promiscuous_enable(portid);
+ /* >8 End of RX port is set in promiscuous mode. */
printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n",
portid,
@@ -938,10 +956,11 @@ main(int argc, char **argv)
uint32_t i;
unsigned int nb_mbufs;
- /* Init EAL */
+ /* Init EAL. 8< */
ret = rte_eal_init(argc, argv);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
+ /* >8 End of init EAL. */
argc -= ret;
argv += ret;
@@ -958,6 +977,7 @@ main(int argc, char **argv)
if (ret < 0)
rte_exit(EXIT_FAILURE, "Invalid IOAT arguments\n");
+ /* Allocates mempool to hold the mbufs. 8< */
nb_mbufs = RTE_MAX(nb_ports * (nb_queues * (nb_rxd + nb_txd +
4 * MAX_PKT_BURST) + rte_lcore_count() * MEMPOOL_CACHE_SIZE),
MIN_POOL_SIZE);
@@ -968,15 +988,18 @@ main(int argc, char **argv)
rte_socket_id());
if (ioat_pktmbuf_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n");
+ /* >8 End of allocates mempool to hold the mbufs. */
- /* Initialise each port */
+ /* Initialize each port. 8< */
cfg.nb_ports = 0;
RTE_ETH_FOREACH_DEV(portid)
port_init(portid, ioat_pktmbuf_pool, nb_queues);
+ /* >8 End of initializing each port. */
/* Initialize port xstats */
memset(&port_statistics, 0, sizeof(port_statistics));
+ /* Assigning each port resources. 8< */
while (!check_link_status(ioat_enabled_port_mask) && !force_quit)
sleep(1);
@@ -990,6 +1013,7 @@ main(int argc, char **argv)
assign_rawdevs();
else /* copy_mode == COPY_MODE_SW_NUM */
assign_rings();
+ /* >8 End of assigning each port resources. */
start_forwarding_cores();
/* main core prints stats while other cores forward */
@@ -168,6 +168,7 @@ struct l3fwd_ipv4_route {
uint8_t if_out;
};
+/* Default l3fwd_ipv4_route_array table. 8< */
struct l3fwd_ipv4_route l3fwd_ipv4_route_array[] = {
{RTE_IPV4(100,10,0,0), 16, 0},
{RTE_IPV4(100,20,0,0), 16, 1},
@@ -178,6 +179,7 @@ struct l3fwd_ipv4_route l3fwd_ipv4_route_array[] = {
{RTE_IPV4(100,70,0,0), 16, 6},
{RTE_IPV4(100,80,0,0), 16, 7},
};
+/* >8 End of default l3fwd_ipv4_route_array table */
/*
* IPv6 forwarding table
@@ -189,6 +191,7 @@ struct l3fwd_ipv6_route {
uint8_t if_out;
};
+/* Default l3fwd_ipv6_route_array table. 8< */
static struct l3fwd_ipv6_route l3fwd_ipv6_route_array[] = {
{{1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 0},
{{2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 1},
@@ -199,6 +202,7 @@ static struct l3fwd_ipv6_route l3fwd_ipv6_route_array[] = {
{{7,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 6},
{{8,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 7},
};
+/* >8 End of default l3fwd_ipv6_route_array table. */
#define LPM_MAX_RULES 1024
#define LPM6_MAX_RULES 1024
@@ -189,6 +189,7 @@ struct l3fwd_ipv4_route {
uint8_t if_out;
};
+/* Default l3fwd_ipv4_route_array table. 8< */
struct l3fwd_ipv4_route l3fwd_ipv4_route_array[] = {
{RTE_IPV4(100,10,0,0), 16, 0},
{RTE_IPV4(100,20,0,0), 16, 1},
@@ -199,6 +200,7 @@ struct l3fwd_ipv4_route l3fwd_ipv4_route_array[] = {
{RTE_IPV4(100,70,0,0), 16, 6},
{RTE_IPV4(100,80,0,0), 16, 7},
};
+/* >8 End of default l3fwd_ipv4_route_array table. */
/*
* IPv6 forwarding table
@@ -210,6 +212,7 @@ struct l3fwd_ipv6_route {
uint8_t if_out;
};
+/* Default l3fwd_ipv6_route_array table. 8< */
static struct l3fwd_ipv6_route l3fwd_ipv6_route_array[] = {
{{1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 0},
{{2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 1},
@@ -220,6 +223,7 @@ static struct l3fwd_ipv6_route l3fwd_ipv6_route_array[] = {
{{7,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 6},
{{8,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 7},
};
+/* >8 End of default l3fwd_ipv6_route_array table. */
#define LPM_MAX_RULES 1024
#define LPM6_MAX_RULES 1024
@@ -856,6 +860,7 @@ setup_queue_tbl(struct rx_queue *rxq, uint32_t lcore, uint32_t queue)
if (socket == SOCKET_ID_ANY)
socket = 0;
+ /* Each table entry holds information about packet fragmentation. 8< */
frag_cycles = (rte_get_tsc_hz() + MS_PER_S - 1) / MS_PER_S *
max_flow_ttl;
@@ -867,6 +872,7 @@ setup_queue_tbl(struct rx_queue *rxq, uint32_t lcore, uint32_t queue)
max_flow_num, lcore, queue);
return -1;
}
+ /* >8 End of holding packet fragmentation. */
/*
* At any given moment up to <max_flow_num * (MAX_FRAG_NUM)>
@@ -874,6 +880,7 @@ setup_queue_tbl(struct rx_queue *rxq, uint32_t lcore, uint32_t queue)
* Plus, each TX queue can hold up to <max_flow_num> packets.
*/
+ /* mbufs stored int the gragment table. 8< */
nb_mbuf = RTE_MAX(max_flow_num, 2UL * MAX_PKT_BURST) * MAX_FRAG_NUM;
nb_mbuf *= (port_conf.rxmode.max_rx_pkt_len + BUF_SIZE - 1) / BUF_SIZE;
nb_mbuf *= 2; /* ipv4 and ipv6 */
@@ -890,6 +897,7 @@ setup_queue_tbl(struct rx_queue *rxq, uint32_t lcore, uint32_t queue)
"rte_pktmbuf_pool_create(%s) failed", buf);
return -1;
}
+ /* >8 End of mbufs stored int the fragmentation table. */
return 0;
}
@@ -68,8 +68,11 @@
* by placing the low-order 23-bits of the IP address into the low-order
* 23 bits of the Ethernet multicast address 01-00-5E-00-00-00 (hex)."
*/
+
+/* Construct Ethernet multicast address from IPv4 multicast Address. 8< */
#define ETHER_ADDR_FOR_IPV4_MCAST(x) \
(rte_cpu_to_be_64(0x01005e000000ULL | ((x) & 0x7fffff)) >> 16)
+/* >8 End of Construction of multicast address from IPv4 multicast address. */
/*
* Configurable number of RX/TX ring descriptors
@@ -176,7 +179,7 @@ send_burst(struct lcore_queue_conf *qconf, uint16_t port)
qconf->tx_mbufs[port].len = 0;
}
-/* Get number of bits set. */
+/* Get number of bits set. 8< */
static inline uint32_t
bitcnt(uint32_t v)
{
@@ -187,6 +190,7 @@ bitcnt(uint32_t v)
return n;
}
+/* >8 End of getting number of bits set. */
/**
* Create the output multicast packet based on the given input packet.
@@ -231,6 +235,8 @@ bitcnt(uint32_t v)
* - The pointer to the new outgoing packet.
* - NULL if operation failed.
*/
+
+/* mcast_out_pkt 8< */
static inline struct rte_mbuf *
mcast_out_pkt(struct rte_mbuf *pkt, int use_clone)
{
@@ -257,11 +263,14 @@ mcast_out_pkt(struct rte_mbuf *pkt, int use_clone)
__rte_mbuf_sanity_check(hdr, 1);
return hdr;
}
+/* >8 End of mcast_out_kt. */
/*
* Write new Ethernet header to the outgoing packet,
* and put it into the outgoing queue for the given port.
*/
+
+/* Write new Ethernet header to outgoing packets. 8< */
static inline void
mcast_send_pkt(struct rte_mbuf *pkt, struct rte_ether_addr *dest_addr,
struct lcore_queue_conf *qconf, uint16_t port)
@@ -287,6 +296,7 @@ mcast_send_pkt(struct rte_mbuf *pkt, struct rte_ether_addr *dest_addr,
if (unlikely(MAX_PKT_BURST == len))
send_burst(qconf, port);
}
+/* >8 End of writing new Ethernet headers. */
/* Multicast forward of the input packet */
static inline void
@@ -302,38 +312,45 @@ mcast_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf)
struct rte_ether_addr as_addr;
} dst_eth_addr;
- /* Remove the Ethernet header from the input packet */
+ /* Remove the Ethernet header from the input packet. 8< */
iphdr = (struct rte_ipv4_hdr *)
rte_pktmbuf_adj(m, (uint16_t)sizeof(struct rte_ether_hdr));
RTE_ASSERT(iphdr != NULL);
dest_addr = rte_be_to_cpu_32(iphdr->dst_addr);
+ /* >8 End of removing the Ethernet header from the input packet. */
/*
* Check that it is a valid multicast address and
* we have some active ports assigned to it.
*/
+
+ /* Check valid multicast address. 8< */
if (!RTE_IS_IPV4_MCAST(dest_addr) ||
(hash = rte_fbk_hash_lookup(mcast_hash, dest_addr)) <= 0 ||
(port_mask = hash & enabled_port_mask) == 0) {
rte_pktmbuf_free(m);
return;
}
+ /* >8 End of valid multicast address check. */
/* Calculate number of destination ports. */
port_num = bitcnt(port_mask);
- /* Should we use rte_pktmbuf_clone() or not. */
+ /* Should we use rte_pktmbuf_clone() or not. 8< */
use_clone = (port_num <= MCAST_CLONE_PORTS &&
m->nb_segs <= MCAST_CLONE_SEGS);
+ /* >8 End of using rte_pktmbuf_clone(). */
/* Mark all packet's segments as referenced port_num times */
if (use_clone == 0)
rte_pktmbuf_refcnt_update(m, (uint16_t)port_num);
- /* construct destination ethernet address */
+ /* Construct destination ethernet address. 8< */
dst_eth_addr.as_int = ETHER_ADDR_FOR_IPV4_MCAST(dest_addr);
+ /* >8 End of constructing destination ethernet address. */
+ /* Packets dispatched to destination ports. 8< */
for (port = 0; use_clone != port_mask; port_mask >>= 1, port++) {
/* Prepare output packet and send it out. */
@@ -345,6 +362,7 @@ mcast_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf)
rte_pktmbuf_free(m);
}
}
+ /* >8 End of packets dispatched to destination ports. */
/*
* If we making clone packets, then, for the last destination port,
@@ -540,6 +558,7 @@ print_ethaddr(const char *name, struct rte_ether_addr *eth_addr)
printf("%s%s", name, buf);
}
+/* Hash object is created and loaded. 8< */
static int
init_mcast_hash(void)
{
@@ -561,6 +580,7 @@ init_mcast_hash(void)
return 0;
}
+/* >8 End of hash object is created and loaded. */
/* Check the link status of all ports in up to 9s, and print them finally */
static void
@@ -647,7 +667,7 @@ main(int argc, char **argv)
if (ret < 0)
rte_exit(EXIT_FAILURE, "Invalid IPV4_MULTICAST parameters\n");
- /* create the mbuf pools */
+ /* Create the mbuf pools. 8< */
packet_pool = rte_pktmbuf_pool_create("packet_pool", NB_PKT_MBUF, 32,
0, PKT_MBUF_DATA_SIZE, rte_socket_id());
@@ -665,6 +685,7 @@ main(int argc, char **argv)
if (clone_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot init clone mbuf pool\n");
+ /* >8 End of create mbuf pools. */
nb_ports = rte_eth_dev_count_avail();
if (nb_ports == 0)
@@ -158,10 +158,11 @@ main(int argc, char *argv[])
unsigned nb_ports;
uint16_t portid;
- /* Initialize the Environment Abstraction Layer (EAL). */
+ /* Initialize the Environment Abstraction Layer (EAL). 8< */
int ret = rte_eal_init(argc, argv);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
+ /* >8 End of initializion the Environment Abstraction Layer (EAL). */
argc -= ret;
argv += ret;
@@ -170,9 +171,12 @@ main(int argc, char *argv[])
* Initialize the PQoS library and configure CAT.
* Please see l2fwd-cat documentation for more info.
*/
+
+ /* Initialize the PQoS. 8< */
ret = cat_init(argc, argv);
if (ret < 0)
rte_exit(EXIT_FAILURE, "PQOS: L3CA init failed!\n");
+ /* >8 End of initialization of PQoS. */
argc -= ret;
argv += ret;
@@ -338,6 +338,7 @@ print_stats(void)
fflush(stdout);
}
+/* l2fwd_crypto_send_burst 8< */
static int
l2fwd_crypto_send_burst(struct lcore_queue_conf *qconf, unsigned n,
struct l2fwd_crypto_params *cparams)
@@ -362,7 +363,9 @@ l2fwd_crypto_send_burst(struct lcore_queue_conf *qconf, unsigned n,
return 0;
}
+/* >8 End of l2fwd_crypto_send_burst. */
+/* Crypto enqueue. 8< */
static int
l2fwd_crypto_enqueue(struct rte_crypto_op *op,
struct l2fwd_crypto_params *cparams)
@@ -386,6 +389,7 @@ l2fwd_crypto_enqueue(struct rte_crypto_op *op,
qconf->op_buf[cparams->dev_id].len = len;
return 0;
}
+/* >8 End of crypto enqueue. */
static int
l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
@@ -571,7 +575,7 @@ l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n,
return 0;
}
-/* Enqueue packets for TX and prepare them to be sent */
+/* Enqueue packets for TX and prepare them to be sent. 8< */
static int
l2fwd_send_packet(struct rte_mbuf *m, uint16_t port)
{
@@ -594,6 +598,7 @@ l2fwd_send_packet(struct rte_mbuf *m, uint16_t port)
qconf->pkt_buf[port].len = len;
return 0;
}
+/* >8 End of Enqueuing packets for TX. */
static void
l2fwd_mac_updating(struct rte_mbuf *m, uint16_t dest_portid)
@@ -657,6 +662,7 @@ generate_random_key(uint8_t *key, unsigned length)
rte_exit(EXIT_FAILURE, "Failed to generate random key\n");
}
+/* Session is created and is later attached to the crypto operation. 8< */
static struct rte_cryptodev_sym_session *
initialize_crypto_session(struct l2fwd_crypto_options *options, uint8_t cdev_id)
{
@@ -695,6 +701,7 @@ initialize_crypto_session(struct l2fwd_crypto_options *options, uint8_t cdev_id)
return session;
}
+/* >8 End of creation of session. */
static void
l2fwd_crypto_options_print(struct l2fwd_crypto_options *options);
@@ -913,6 +920,7 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
port_statistics[portid].rx += nb_rx;
+ /* Allocate and fillcrypto operations. 8< */
if (nb_rx) {
/*
* If we can't allocate a crypto_ops, then drop
@@ -929,6 +937,7 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
nb_rx = 0;
}
+ /* >8 End of crypto operation allocated and filled. */
/* Enqueue packets from Crypto device*/
for (j = 0; j < nb_rx; j++) {
@@ -939,7 +948,7 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
}
}
- /* Dequeue packets from Crypto device */
+ /* Dequeue packets from Crypto device. 8< */
do {
nb_rx = rte_cryptodev_dequeue_burst(
cparams->dev_id, cparams->qp_id,
@@ -957,6 +966,7 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
options);
}
} while (nb_rx == MAX_PKT_BURST);
+ /* >8 End of dequeue packets from crypto device. */
}
}
}
@@ -2099,7 +2109,8 @@ check_capabilities(struct l2fwd_crypto_options *options, uint8_t cdev_id)
if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH ||
options->xform_chain == L2FWD_CRYPTO_HASH_CIPHER ||
options->xform_chain == L2FWD_CRYPTO_CIPHER_ONLY) {
- /* Check if device supports cipher algo */
+
+ /* Check if device supports cipher algo. 8< */
cap = check_device_support_cipher_algo(options, &dev_info,
cdev_id);
if (cap == NULL)
@@ -2114,6 +2125,9 @@ check_capabilities(struct l2fwd_crypto_options *options, uint8_t cdev_id)
cdev_id);
return -1;
}
+ /* >8 End of check if device supports cipher algo. */
+
+ /* Check if capable cipher is supported. 8< */
/*
* Check if length of provided cipher key is supported
@@ -2158,6 +2172,7 @@ check_capabilities(struct l2fwd_crypto_options *options, uint8_t cdev_id)
return -1;
}
}
+ /* >8 End of checking if cipher is supported. */
}
/* Set auth parameters */
@@ -64,10 +64,12 @@ l2fwd_event_init_ports(struct l2fwd_resources *rsrc)
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ /* Configure RX and TX queue. 8< */
ret = rte_eth_dev_configure(port_id, 1, 1, &local_port_conf);
if (ret < 0)
rte_panic("Cannot configure device: err=%d, port=%u\n",
ret, port_id);
+ /* >8 End of configuration RX and TX queue. */
ret = rte_eth_dev_adjust_nb_rx_tx_desc(port_id, &nb_rxd,
&nb_txd);
@@ -81,6 +83,7 @@ l2fwd_event_init_ports(struct l2fwd_resources *rsrc)
fflush(stdout);
rxq_conf = dev_info.default_rxconf;
rxq_conf.offloads = local_port_conf.rxmode.offloads;
+ /* Using lcore to poll one or several ports. 8< */
ret = rte_eth_rx_queue_setup(port_id, 0, nb_rxd,
rte_eth_dev_socket_id(port_id),
&rxq_conf,
@@ -89,7 +92,9 @@ l2fwd_event_init_ports(struct l2fwd_resources *rsrc)
rte_panic("rte_eth_rx_queue_setup:err=%d, port=%u\n",
ret, port_id);
- /* init one TX queue on each port */
+ /* >8 End of using lcore to poll one or several ports. */
+
+ /* Init one TX queue on each port. 8< */
fflush(stdout);
txq_conf = dev_info.default_txconf;
txq_conf.offloads = local_port_conf.txmode.offloads;
@@ -99,6 +104,7 @@ l2fwd_event_init_ports(struct l2fwd_resources *rsrc)
if (ret < 0)
rte_panic("rte_eth_tx_queue_setup:err=%d, port=%u\n",
ret, port_id);
+ /* >8 End of init one TX queue on each port. */
rte_eth_promiscuous_enable(port_id);
@@ -66,6 +66,7 @@ l2fwd_event_service_setup(struct l2fwd_resources *rsrc)
uint32_t service_id, caps;
int ret, i;
+ /* Running eventdev scheduler service on service core. 8< */
rte_event_dev_info_get(evt_rsrc->event_d_id, &evdev_info);
if (!(evdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED)) {
ret = rte_event_dev_service_id_get(evt_rsrc->event_d_id,
@@ -74,7 +75,9 @@ l2fwd_event_service_setup(struct l2fwd_resources *rsrc)
rte_panic("Error in starting eventdev service\n");
l2fwd_event_service_enable(service_id);
}
+ /* >8 End of running eventdev scheduler service on service core. */
+ /* Gets service ID for RX/TX adapters. 8< */
for (i = 0; i < evt_rsrc->rx_adptr.nb_rx_adptr; i++) {
ret = rte_event_eth_rx_adapter_caps_get(evt_rsrc->event_d_id,
evt_rsrc->rx_adptr.rx_adptr[i], &caps);
@@ -104,6 +107,7 @@ l2fwd_event_service_setup(struct l2fwd_resources *rsrc)
evt_rsrc->tx_adptr.tx_adptr[i]);
l2fwd_event_service_enable(service_id);
}
+ /* >8 End of get service ID for RX/TX adapters. */
}
static void
@@ -244,7 +248,7 @@ l2fwd_event_loop_burst(struct l2fwd_resources *rsrc,
rte_lcore_id());
while (!rsrc->force_quit) {
- /* Read packet from eventdev */
+ /* Read packet from eventdev. 8< */
nb_rx = rte_event_dequeue_burst(event_d_id, port_id, ev,
deq_len, 0);
if (nb_rx == 0)
@@ -254,14 +258,17 @@ l2fwd_event_loop_burst(struct l2fwd_resources *rsrc,
l2fwd_event_fwd(rsrc, &ev[i], tx_q_id, timer_period,
flags);
}
+ /* >8 End of reading packets from eventdev. */
if (flags & L2FWD_EVENT_TX_ENQ) {
+ /* Forwarding to destination ports. 8< */
nb_tx = rte_event_enqueue_burst(event_d_id, port_id,
ev, nb_rx);
while (nb_tx < nb_rx && !rsrc->force_quit)
nb_tx += rte_event_enqueue_burst(event_d_id,
port_id, ev + nb_tx,
nb_rx - nb_tx);
+ /* >8 End of forwarding to destination ports. */
}
if (flags & L2FWD_EVENT_TX_DIRECT) {
@@ -20,12 +20,14 @@ static uint32_t
l2fwd_event_device_setup_generic(struct l2fwd_resources *rsrc)
{
struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
+ /* Configures event device as per below configuration. 8< */
struct rte_event_dev_config event_d_conf = {
.nb_events_limit = 4096,
.nb_event_queue_flows = 1024,
.nb_event_port_dequeue_depth = 128,
.nb_event_port_enqueue_depth = 128
};
+ /* >8 End of configuration event device as per below configuration. */
struct rte_event_dev_info dev_info;
const uint8_t event_d_id = 0; /* Always use first event device only */
uint32_t event_queue_cfg = 0;
@@ -97,6 +99,7 @@ l2fwd_event_port_setup_generic(struct l2fwd_resources *rsrc)
{
struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
uint8_t event_d_id = evt_rsrc->event_d_id;
+ /* Event port initialization. 8< */
struct rte_event_port_conf event_p_conf = {
.dequeue_depth = 32,
.enqueue_depth = 32,
@@ -149,6 +152,7 @@ l2fwd_event_port_setup_generic(struct l2fwd_resources *rsrc)
rte_panic("Error in linking event port %d to queues\n",
event_p_id);
evt_rsrc->evp.event_p_id[event_p_id] = event_p_id;
+ /* >8 End of event port initialization. */
}
/* init spinlock */
rte_spinlock_init(&evt_rsrc->evp.lock);
@@ -162,6 +166,7 @@ l2fwd_event_queue_setup_generic(struct l2fwd_resources *rsrc,
{
struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
uint8_t event_d_id = evt_rsrc->event_d_id;
+ /* Event queue initialization. 8< */
struct rte_event_queue_conf event_q_conf = {
.nb_atomic_flows = 1024,
.nb_atomic_order_sequences = 1024,
@@ -181,6 +186,7 @@ l2fwd_event_queue_setup_generic(struct l2fwd_resources *rsrc,
ret = rte_event_queue_default_conf_get(event_d_id, 0, &def_q_conf);
if (ret < 0)
rte_panic("Error to get default config of event queue\n");
+ /* >8 End of event queue initialization. */
if (def_q_conf.nb_atomic_flows < event_q_conf.nb_atomic_flows)
event_q_conf.nb_atomic_flows = def_q_conf.nb_atomic_flows;
@@ -294,6 +300,7 @@ l2fwd_rx_tx_adapter_setup_generic(struct l2fwd_resources *rsrc)
rte_service_set_runstate_mapped_check(service_id, 0);
evt_rsrc->tx_adptr.service_id = service_id;
+ /* Extra port created. 8< */
ret = rte_event_eth_tx_adapter_event_port_get(tx_adptr_id, &tx_port_id);
if (ret)
rte_panic("Failed to get Tx adapter port id: %d\n", ret);
@@ -305,6 +312,7 @@ l2fwd_rx_tx_adapter_setup_generic(struct l2fwd_resources *rsrc)
if (ret != 1)
rte_panic("Unable to link Tx adapter port to Tx queue:err=%d\n",
ret);
+ /* >8 End of extra port created. */
ret = rte_event_eth_tx_adapter_start(tx_adptr_id);
if (ret)
@@ -226,7 +226,7 @@ l2fwd_rx_tx_adapter_setup_internal_port(struct l2fwd_resources *rsrc)
rte_panic("Failed to allocate memery for Rx adapter\n");
}
-
+ /* Assigned ethernet port. 8< */
RTE_ETH_FOREACH_DEV(port_id) {
if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
continue;
@@ -286,6 +286,7 @@ l2fwd_rx_tx_adapter_setup_internal_port(struct l2fwd_resources *rsrc)
evt_rsrc->tx_adptr.tx_adptr[adapter_id] = adapter_id;
adapter_id++;
}
+ /* >8 End of assigned ethernet port. */
}
void
@@ -61,6 +61,7 @@ l2fwd_poll_main_loop(struct l2fwd_resources *rsrc)
while (!rsrc->force_quit) {
+ /* Draining TX queue in main loop. 8< */
cur_tsc = rte_rdtsc();
/*
@@ -79,10 +80,11 @@ l2fwd_poll_main_loop(struct l2fwd_resources *rsrc)
prev_tsc = cur_tsc;
}
+ /* >8 End of draining TX queue in main loop. */
- /*
- * Read packet from RX queues
- */
+ /* Reading ingress packets. 8< */
+
+ /* Read packet from RX queues */
for (i = 0; i < qconf->n_rx_port; i++) {
port_id = qconf->rx_port_list[i];
@@ -97,6 +99,7 @@ l2fwd_poll_main_loop(struct l2fwd_resources *rsrc)
l2fwd_poll_simple_forward(rsrc, m, port_id);
}
}
+ /* >8 End of reading ingress packets. */
}
}
@@ -210,7 +210,7 @@ l2fwd_event_parse_args(int argc, char **argv, struct l2fwd_resources *rsrc)
int option_index;
char **argvopt;
- /* reset l2fwd_dst_ports */
+ /* Reset l2fwd_dst_ports. 8< */
for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
rsrc->dst_ports[port_id] = UINT32_MAX;
@@ -289,6 +289,7 @@ l2fwd_event_parse_args(int argc, char **argv, struct l2fwd_resources *rsrc)
ret = optind-1;
optind = 1; /* reset getopt lib */
return ret;
+ /* >8 End of reset l2fwd_dst_ports. */
}
/*
@@ -563,7 +564,7 @@ main(int argc, char **argv)
uint16_t nb_ports;
int i, ret;
- /* init EAL */
+ /* Init EAL. 8< */
ret = rte_eal_init(argc, argv);
if (ret < 0)
rte_panic("Invalid EAL arguments\n");
@@ -579,6 +580,7 @@ main(int argc, char **argv)
ret = l2fwd_event_parse_args(argc, argv, rsrc);
if (ret < 0)
rte_panic("Invalid L2FWD arguments\n");
+ /* >8 End of init EAL. */
printf("MAC updating %s\n", rsrc->mac_updating ? "enabled" :
"disabled");
@@ -626,12 +628,13 @@ main(int argc, char **argv)
MAX_PKT_BURST + rte_lcore_count() *
MEMPOOL_CACHE_SIZE), 8192U);
- /* create the mbuf pool */
+ /* Create the mbuf pool. 8< */
rsrc->pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool",
nb_mbufs, MEMPOOL_CACHE_SIZE, 0,
RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
if (rsrc->pktmbuf_pool == NULL)
rte_panic("Cannot init mbuf pool\n");
+ /* >8 End of creation of mbuf pool. */
nb_ports_available = l2fwd_event_init_ports(rsrc);
if (!nb_ports_available)
@@ -67,6 +67,7 @@ static unsigned int l2fwd_rx_queue_per_lcore = 1;
#define MAX_RX_QUEUE_PER_LCORE 16
#define MAX_TX_QUEUE_PER_PORT 16
+/* List of queues to be polled for given lcore. 8< */
struct lcore_queue_conf {
unsigned n_rx_port;
unsigned rx_port_list[MAX_RX_QUEUE_PER_LCORE];
@@ -83,6 +84,7 @@ struct lcore_queue_conf {
rte_atomic16_t stats_read_pending;
rte_spinlock_t lock;
} __rte_cache_aligned;
+/* >8 End of list of queues to be polled for given lcore. */
struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
@@ -335,6 +337,7 @@ show_stats_cb(__rte_unused void *param)
rte_eal_alarm_set(timer_period * US_PER_S, show_stats_cb, NULL);
}
+/* Start of l2fwd_simple_forward. 8< */
static void
l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid)
{
@@ -359,6 +362,7 @@ l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid)
if (sent)
port_statistics[dst_port].tx += sent;
}
+/* >8 End of l2fwd_simple_forward. */
static void
l2fwd_job_update_cb(struct rte_jobstats *job, int64_t result)
@@ -395,6 +399,7 @@ l2fwd_fwd_job(__rte_unused struct rte_timer *timer, void *arg)
/* Call rx burst 2 times. This allow rte_jobstats logic to see if this
* function must be called more frequently. */
+ /* Call rx burst 2 times. 8< */
total_nb_rx = rte_eth_rx_burst(portid, 0, pkts_burst,
MAX_PKT_BURST);
@@ -403,7 +408,9 @@ l2fwd_fwd_job(__rte_unused struct rte_timer *timer, void *arg)
rte_prefetch0(rte_pktmbuf_mtod(m, void *));
l2fwd_simple_forward(m, portid);
}
+ /* >8 End of call rx burst 2 times. */
+ /* Read second try. 8< */
if (total_nb_rx == MAX_PKT_BURST) {
const uint16_t nb_rx = rte_eth_rx_burst(portid, 0, pkts_burst,
MAX_PKT_BURST);
@@ -415,16 +422,19 @@ l2fwd_fwd_job(__rte_unused struct rte_timer *timer, void *arg)
l2fwd_simple_forward(m, portid);
}
}
+ /* >8 End of read second try. */
port_statistics[portid].rx += total_nb_rx;
- /* Adjust period time in which we are running here. */
+ /* Adjust period time in which we are running here. 8< */
if (rte_jobstats_finish(job, total_nb_rx) != 0) {
rte_timer_reset(&qconf->rx_timers[port_idx], job->period, PERIODICAL,
lcore_id, l2fwd_fwd_job, arg);
}
+ /* >8 End of adjust period time in which we are running. */
}
+/* Draining TX queue of each port. 8< */
static void
l2fwd_flush_job(__rte_unused struct rte_timer *timer, __rte_unused void *arg)
{
@@ -463,6 +473,7 @@ l2fwd_flush_job(__rte_unused struct rte_timer *timer, __rte_unused void *arg)
* in which it was called. */
rte_jobstats_finish(&qconf->flush_job, qconf->flush_job.target);
}
+/* >8 End of draining TX queue of each port. */
/* main processing loop */
static void
@@ -493,6 +504,7 @@ l2fwd_main_loop(void)
rte_jobstats_init(&qconf->idle_job, "idle", 0, 0, 0, 0);
+ /* Minimize impact of stats reading. 8< */
for (;;) {
rte_spinlock_lock(&qconf->lock);
@@ -535,6 +547,7 @@ l2fwd_main_loop(void)
rte_spinlock_unlock(&qconf->lock);
rte_pause();
}
+ /* >8 End of minimize impact of stats reading. */
}
static int
@@ -749,7 +762,7 @@ main(int argc, char **argv)
uint16_t portid, last_port;
uint8_t i;
- /* init EAL */
+ /* Init EAL. 8< */
ret = rte_eal_init(argc, argv);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
@@ -760,24 +773,25 @@ main(int argc, char **argv)
ret = l2fwd_parse_args(argc, argv);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Invalid L2FWD arguments\n");
+ /* >8 End of init EAL. */
rte_timer_subsystem_init();
/* fetch default timer frequency. */
hz = rte_get_timer_hz();
- /* create the mbuf pool */
+ /* Create the mbuf pool. 8< */
l2fwd_pktmbuf_pool =
rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF, 32,
0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
if (l2fwd_pktmbuf_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n");
-
+ /* >8 End of creation of mbuf pool. */
nb_ports = rte_eth_dev_count_avail();
if (nb_ports == 0)
rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
- /* reset l2fwd_dst_ports */
+ /* Reset l2fwd_dst_ports. 8< */
for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++)
l2fwd_dst_ports[portid] = 0;
last_port = 0;
@@ -798,6 +812,7 @@ main(int argc, char **argv)
nb_ports_in_mask++;
}
+ /* >8 End of reset l2fwd_dst_ports. */
if (nb_ports_in_mask % 2) {
printf("Notice: odd number of ports in portmask.\n");
l2fwd_dst_ports[last_port] = last_port;
@@ -857,10 +872,12 @@ main(int argc, char **argv)
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ /* Configure the RX and TX queues. 8< */
ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n",
ret, portid);
+ /* >8 End of configuring the RX and TX queues. */
ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
&nb_txd);
@@ -880,6 +897,7 @@ main(int argc, char **argv)
fflush(stdout);
rxq_conf = dev_info.default_rxconf;
rxq_conf.offloads = local_port_conf.rxmode.offloads;
+ /* RX queue initialization. 8< */
ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
rte_eth_dev_socket_id(portid),
&rxq_conf,
@@ -887,8 +905,9 @@ main(int argc, char **argv)
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup:err=%d, port=%u\n",
ret, portid);
+ /* >8 End of RX queue initialization. */
- /* init one TX queue on each port */
+ /* Init one TX queue on each port. 8< */
txq_conf = dev_info.default_txconf;
txq_conf.offloads = local_port_conf.txmode.offloads;
fflush(stdout);
@@ -899,6 +918,7 @@ main(int argc, char **argv)
rte_exit(EXIT_FAILURE,
"rte_eth_tx_queue_setup:err=%d, port=%u\n",
ret, portid);
+ /* >8 End of init one TX queue on each port. */
/* Initialize TX buffers */
tx_buffer[portid] = rte_zmalloc_socket("tx_buffer",
@@ -971,9 +991,11 @@ main(int argc, char **argv)
lcore_id);
continue;
}
- /* Add flush job.
- * Set fixed period by setting min = max = initial period. Set target to
- * zero as it is irrelevant for this job. */
+ /* Add flush job. 8< */
+
+ /* Set fixed period by setting min = max = initial period. Set target to
+ * zero as it is irrelevant for this job.
+ */
rte_jobstats_init(&qconf->flush_job, "flush", drain_tsc, drain_tsc,
drain_tsc, 0);
@@ -985,6 +1007,7 @@ main(int argc, char **argv)
rte_exit(1, "Failed to reset flush job timer for lcore %u: %s",
lcore_id, rte_strerror(-ret));
}
+ /* >8 End of add flush job. */
for (i = 0; i < qconf->n_rx_port; i++) {
struct rte_jobstats *job = &qconf->port_fwd_jobs[i];
@@ -993,9 +1016,11 @@ main(int argc, char **argv)
printf("Setting forward job for port %u\n", portid);
snprintf(name, RTE_DIM(name), "port %u fwd", portid);
- /* Setup forward job.
- * Set min, max and initial period. Set target to MAX_PKT_BURST as
- * this is desired optimal RX/TX burst size. */
+ /* Setup forward job. 8< */
+
+ /* Set min, max and initial period. Set target to MAX_PKT_BURST as
+ * this is desired optimal RX/TX burst size.
+ */
rte_jobstats_init(job, name, 0, drain_tsc, 0, MAX_PKT_BURST);
rte_jobstats_set_update_period_function(job, l2fwd_job_update_cb);
@@ -1007,6 +1032,7 @@ main(int argc, char **argv)
rte_exit(1, "Failed to reset lcore %u port %u job timer: %s",
lcore_id, qconf->rx_port_list[i], rte_strerror(-ret));
}
+ /* >8 End of forward job. */
}
}
@@ -227,7 +227,7 @@ l2fwd_main_loop(void)
uint64_t tsc_lifetime = (rand()&0x07) * rte_get_tsc_hz();
while (!terminate_signal_received) {
- /* Keepalive heartbeat */
+ /* Keepalive heartbeat. 8< */
rte_keepalive_mark_alive(rte_global_keepalive_info);
cur_tsc = rte_rdtsc();
@@ -238,6 +238,7 @@ l2fwd_main_loop(void)
*/
if (check_period > 0 && cur_tsc - tsc_initial > tsc_lifetime)
break;
+ /* >8 End of keepalive heartbeat. */
/*
* TX burst queue drain
@@ -760,10 +761,12 @@ main(int argc, char **argv)
if (ka_shm == NULL)
rte_exit(EXIT_FAILURE,
"rte_keepalive_shm_create() failed");
+ /* Initialize keepalive functionality. 8< */
rte_global_keepalive_info =
rte_keepalive_create(&dead_core, ka_shm);
if (rte_global_keepalive_info == NULL)
rte_exit(EXIT_FAILURE, "init_keep_alive() failed");
+ /* >8 End of initializing keepalive functionality. */
rte_keepalive_register_relay_callback(rte_global_keepalive_info,
relay_core_state, ka_shm);
rte_timer_init(&hb_timer);
@@ -778,6 +781,7 @@ main(int argc, char **argv)
rte_exit(EXIT_FAILURE, "Keepalive setup failure.\n");
}
if (timer_period > 0) {
+ /* Issues the pings keepalive_dispatch_pings(). 8< */
if (rte_timer_reset(&stats_timer,
(timer_period * rte_get_timer_hz()) / 1000,
PERIODICAL,
@@ -785,6 +789,7 @@ main(int argc, char **argv)
&print_stats, NULL
) != 0 )
rte_exit(EXIT_FAILURE, "Stats setup failure.\n");
+ /* >8 End of issuing the pings keepalive_dispatch_pings(). */
}
/* launch per-lcore init on every worker lcore */
RTE_LCORE_FOREACH_WORKER(lcore_id) {
@@ -80,11 +80,13 @@ static unsigned int l2fwd_rx_queue_per_lcore = 1;
#define MAX_RX_QUEUE_PER_LCORE 16
#define MAX_TX_QUEUE_PER_PORT 16
+/* List of queues to be polled for a given lcore. 8< */
struct lcore_queue_conf {
unsigned n_rx_port;
unsigned rx_port_list[MAX_RX_QUEUE_PER_LCORE];
} __rte_cache_aligned;
struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
+/* >8 End of list of queues to be polled for a given lcore. */
static struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
@@ -175,6 +177,7 @@ l2fwd_mac_updating(struct rte_mbuf *m, unsigned dest_portid)
rte_ether_addr_copy(&l2fwd_ports_eth_addr[dest_portid], ð->s_addr);
}
+/* Simple forward. 8< */
static void
l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid)
{
@@ -192,6 +195,7 @@ l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid)
if (sent)
port_statistics[dst_port].tx += sent;
}
+/* >8 End of simple forward. */
/* main processing loop */
static void
@@ -231,6 +235,7 @@ l2fwd_main_loop(void)
while (!force_quit) {
+ /* Drains TX queue in its main loop. 8< */
cur_tsc = rte_rdtsc();
/*
@@ -270,10 +275,9 @@ l2fwd_main_loop(void)
prev_tsc = cur_tsc;
}
+ /* >8 End of draining TX queue. */
- /*
- * Read packet from RX queues
- */
+ /* Read packet from RX queues. 8< */
for (i = 0; i < qconf->n_rx_port; i++) {
portid = qconf->rx_port_list[i];
@@ -288,6 +292,7 @@ l2fwd_main_loop(void)
l2fwd_simple_forward(m, portid);
}
}
+ /* >8 End of read packet from RX queues. */
}
}
@@ -647,7 +652,7 @@ main(int argc, char **argv)
unsigned int nb_lcores = 0;
unsigned int nb_mbufs;
- /* init EAL */
+ /* Init EAL. 8< */
ret = rte_eal_init(argc, argv);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
@@ -662,6 +667,7 @@ main(int argc, char **argv)
ret = l2fwd_parse_args(argc, argv);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Invalid L2FWD arguments\n");
+ /* >8 End of init EAL. */
printf("MAC updating %s\n", mac_updating ? "enabled" : "disabled");
@@ -682,6 +688,8 @@ main(int argc, char **argv)
rte_exit(EXIT_FAILURE, "Invalid portmask; possible (0x%x)\n",
(1 << nb_ports) - 1);
+ /* Initialization of the driver. 8< */
+
/* reset l2fwd_dst_ports */
for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++)
l2fwd_dst_ports[portid] = 0;
@@ -717,6 +725,7 @@ main(int argc, char **argv)
l2fwd_dst_ports[last_port] = last_port;
}
}
+ /* >8 End of initialization of the driver. */
rx_lcore_id = 0;
qconf = NULL;
@@ -751,12 +760,13 @@ main(int argc, char **argv)
nb_mbufs = RTE_MAX(nb_ports * (nb_rxd + nb_txd + MAX_PKT_BURST +
nb_lcores * MEMPOOL_CACHE_SIZE), 8192U);
- /* create the mbuf pool */
+ /* Create the mbuf pool. 8< */
l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", nb_mbufs,
MEMPOOL_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE,
rte_socket_id());
if (l2fwd_pktmbuf_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n");
+ /* >8 End of create the mbuf pool. */
/* Initialise each port */
RTE_ETH_FOREACH_DEV(portid) {
@@ -785,10 +795,12 @@ main(int argc, char **argv)
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ /* Configure the number of queues for a port. */
ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n",
ret, portid);
+ /* >8 End of configuration of the number of queues for a port. */
ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
&nb_txd);
@@ -808,6 +820,7 @@ main(int argc, char **argv)
fflush(stdout);
rxq_conf = dev_info.default_rxconf;
rxq_conf.offloads = local_port_conf.rxmode.offloads;
+ /* RX queue setup. 8< */
ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
rte_eth_dev_socket_id(portid),
&rxq_conf,
@@ -815,8 +828,9 @@ main(int argc, char **argv)
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup:err=%d, port=%u\n",
ret, portid);
+ /* >8 End of RX queue setup. */
- /* init one TX queue on each port */
+ /* Init one TX queue on each port. 8< */
fflush(stdout);
txq_conf = dev_info.default_txconf;
txq_conf.offloads = local_port_conf.txmode.offloads;
@@ -826,6 +840,7 @@ main(int argc, char **argv)
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup:err=%d, port=%u\n",
ret, portid);
+ /* >8 End of init one TX queue on each port. */
/* Initialize TX buffers */
tx_buffer[portid] = rte_zmalloc_socket("tx_buffer",
@@ -691,7 +691,7 @@ print_stats(void)
rte_graph_cluster_stats_destroy(stats);
}
-/* Main processing loop */
+/* Main processing loop. 8< */
static int
graph_main_loop(void *conf)
{
@@ -720,12 +720,14 @@ graph_main_loop(void *conf)
return 0;
}
+/* >8 End of main processing loop. */
int
main(int argc, char **argv)
{
/* Rewrite data of src and dst ether addr */
uint8_t rewrite_data[2 * sizeof(struct rte_ether_addr)];
+ /* Graph initialization. 8< */
static const char * const default_patterns[] = {
"ip4*",
"ethdev_tx-*",
@@ -782,7 +784,7 @@ main(int argc, char **argv)
nb_ports = rte_eth_dev_count_avail();
nb_lcores = rte_lcore_count();
- /* Initialize all ports */
+ /* Initialize all ports. 8< */
RTE_ETH_FOREACH_DEV(portid)
{
struct rte_eth_conf local_port_conf = port_conf;
@@ -962,6 +964,7 @@ main(int argc, char **argv)
/* Ethdev node config, skip rx queue mapping */
ret = rte_node_eth_config(ethdev_conf, nb_conf, nb_graphs);
+ /* >8 End of graph creation. */
if (ret)
rte_exit(EXIT_FAILURE, "rte_node_eth_config: err=%d\n", ret);
@@ -1037,6 +1040,7 @@ main(int argc, char **argv)
qconf->graph_id = graph_id;
qconf->graph = rte_graph_lookup(qconf->name);
+ /* >8 End of graph initialization. */
if (!qconf->graph)
rte_exit(EXIT_FAILURE,
"rte_graph_lookup(): graph %s not found\n",
@@ -1046,7 +1050,7 @@ main(int argc, char **argv)
memset(&rewrite_data, 0, sizeof(rewrite_data));
rewrite_len = sizeof(rewrite_data);
- /* Add route to ip4 graph infra */
+ /* Add route to ip4 graph infra. 8< */
for (i = 0; i < IPV4_L3FWD_LPM_NUM_ROUTES; i++) {
char route_str[INET6_ADDRSTRLEN * 4];
char abuf[INET6_ADDRSTRLEN];
@@ -1090,6 +1094,7 @@ main(int argc, char **argv)
RTE_LOG(INFO, L3FWD_GRAPH, "Added route %s, next_hop %u\n",
route_str, i);
}
+ /* >8 End of adding route to ip4 graph infa. */
/* Launch per-lcore init on every worker lcore */
rte_eal_mp_remote_launch(graph_main_loop, NULL, SKIP_MAIN);
@@ -911,7 +911,7 @@ static int event_register(struct lcore_conf *qconf)
return 0;
}
-/* main processing loop */
+/* Main processing loop. 8< */
static int main_intr_loop(__rte_unused void *dummy)
{
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
@@ -1074,6 +1074,7 @@ static int main_intr_loop(__rte_unused void *dummy)
return 0;
}
+/* >8 End of main processing loop. */
/* main processing loop */
static int
@@ -2504,6 +2505,7 @@ mode_to_str(enum appmode mode)
}
}
+/* Power library initialized in the main routine. 8< */
int
main(int argc, char **argv)
{
@@ -2781,6 +2783,7 @@ main(int argc, char **argv)
}
}
}
+ /* >8 End of power library initialization. */
printf("\n");
@@ -243,6 +243,7 @@ em_mask_key(void *key, xmm_t mask)
#error No vector engine (SSE, NEON, ALTIVEC) available, check your toolchain
#endif
+/* Performing hash-based lookups. 8< */
static inline uint16_t
em_get_ipv4_dst_port(void *ipv4_hdr, uint16_t portid, void *lookup_struct)
{
@@ -264,6 +265,7 @@ em_get_ipv4_dst_port(void *ipv4_hdr, uint16_t portid, void *lookup_struct)
ret = rte_hash_lookup(ipv4_l3fwd_lookup_struct, (const void *)&key);
return (ret < 0) ? portid : ipv4_l3fwd_out_if[ret];
}
+/* >8 End of performing hash-based lookups. */
static inline uint16_t
em_get_ipv6_dst_port(void *ipv6_hdr, uint16_t portid, void *lookup_struct)
@@ -876,9 +878,7 @@ em_event_main_loop_tx_q_burst(__rte_unused void *dummy)
return 0;
}
-/*
- * Initialize exact match (hash) parameters.
- */
+/* Initialize exact match (hash) parameters. 8< */
void
setup_hash(const int socketid)
{
@@ -953,6 +953,7 @@ setup_hash(const int socketid)
}
}
}
+/* >8 End of initialization of hash parameters. */
/* Return ipv4/ipv6 em fwd lookup struct. */
void *
@@ -412,7 +412,7 @@ fib_event_main_loop_tx_q_burst(__rte_unused void *dummy)
return 0;
}
-/* Function to setup fib. */
+/* Function to setup fib. 8< */
void
setup_fib(const int socketid)
{
@@ -468,6 +468,7 @@ setup_fib(const int socketid)
ipv4_l3fwd_route_array[i].if_out);
}
}
+ /* >8 End of setup fib. */
/* Create the fib IPv6 table. */
snprintf(s, sizeof(s), "IPV6_L3FWD_FIB_%d", socketid);
@@ -40,6 +40,7 @@
static struct rte_lpm *ipv4_l3fwd_lpm_lookup_struct[NB_SOCKETS];
static struct rte_lpm6 *ipv6_l3fwd_lpm_lookup_struct[NB_SOCKETS];
+/* Performing LPM-based lookups. 8< */
static inline uint16_t
lpm_get_ipv4_dst_port(const struct rte_ipv4_hdr *ipv4_hdr,
uint16_t portid,
@@ -53,6 +54,7 @@ lpm_get_ipv4_dst_port(const struct rte_ipv4_hdr *ipv4_hdr,
else
return portid;
}
+/* >8 End of performing LPM-based lookups. */
static inline uint16_t
lpm_get_ipv6_dst_port(const struct rte_ipv6_hdr *ipv6_hdr,
@@ -66,15 +66,18 @@ static unsigned lsi_dst_ports[RTE_MAX_ETHPORTS] = {0};
#define MAX_RX_QUEUE_PER_LCORE 16
#define MAX_TX_QUEUE_PER_PORT 16
+/* List of queues must be polled for a give lcore. 8< */
struct lcore_queue_conf {
unsigned n_rx_port;
unsigned rx_port_list[MAX_RX_QUEUE_PER_LCORE];
unsigned tx_queue_id;
} __rte_cache_aligned;
struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
+/* >8 End of list of queues to be polled. */
struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
+/* Global configuration stored in a static structure. 8< */
static struct rte_eth_conf port_conf = {
.rxmode = {
.split_hdr_size = 0,
@@ -86,6 +89,7 @@ static struct rte_eth_conf port_conf = {
.lsc = 1, /**< lsc interrupt feature enabled */
},
};
+/* >8 End of global configuration stored in a static structure. */
struct rte_mempool * lsi_pktmbuf_pool = NULL;
@@ -165,6 +169,7 @@ print_stats(void)
fflush(stdout);
}
+/* Replacing the source and destination MAC addresses. 8< */
static void
lsi_simple_forward(struct rte_mbuf *m, unsigned portid)
{
@@ -188,6 +193,7 @@ lsi_simple_forward(struct rte_mbuf *m, unsigned portid)
if (sent)
port_statistics[dst_port].tx += sent;
}
+/* >8 End of replacing the source and destination MAC addresses. */
/* main processing loop */
static void
@@ -226,6 +232,7 @@ lsi_main_loop(void)
while (1) {
+ /* Draining TX queue in its main loop. 8< */
cur_tsc = rte_rdtsc();
/*
@@ -265,10 +272,9 @@ lsi_main_loop(void)
prev_tsc = cur_tsc;
}
+ /* >8 End of draining TX queue in its main loop. */
- /*
- * Read packet from RX queues
- */
+ /* Read packet from RX queues. 8< */
for (i = 0; i < qconf->n_rx_port; i++) {
portid = qconf->rx_port_list[i];
@@ -283,6 +289,7 @@ lsi_main_loop(void)
lsi_simple_forward(m, portid);
}
}
+ /* >8 End of reading packet from RX queues. */
}
}
@@ -435,6 +442,8 @@ lsi_parse_args(int argc, char **argv)
* @return
* int.
*/
+
+/* lsi_event_callback 8< */
static int
lsi_event_callback(uint16_t port_id, enum rte_eth_event_type type, void *param,
void *ret_param)
@@ -459,6 +468,7 @@ lsi_event_callback(uint16_t port_id, enum rte_eth_event_type type, void *param,
return 0;
}
+/* >8 End of registering one or more callbacks. */
/* Check the link status of all ports in up to 9s, and print them finally */
static void
@@ -553,9 +563,7 @@ main(int argc, char **argv)
if (nb_ports == 0)
rte_panic("No Ethernet port - bye\n");
- /*
- * Each logical core is assigned a dedicated TX queue on each port.
- */
+ /* Each logical core is assigned a dedicated TX queue on each port. 8< */
for (portid = 0; portid < nb_ports; portid++) {
/* skip ports that are not enabled */
if ((lsi_enabled_port_mask & (1 << portid)) == 0)
@@ -571,6 +579,7 @@ main(int argc, char **argv)
nb_ports_in_mask++;
}
+ /* >8 End of assigning logical core. */
if (nb_ports_in_mask < 2 || nb_ports_in_mask % 2)
rte_exit(EXIT_FAILURE, "Current enabled port number is %u, "
"but it should be even and at least 2\n",
@@ -628,10 +637,12 @@ main(int argc, char **argv)
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ /* Configure RX and TX queues. 8< */
ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n",
ret, (unsigned) portid);
+ /* >8 End of configure RX and TX queues. */
ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
&nb_txd);
@@ -645,8 +656,11 @@ main(int argc, char **argv)
* lsc interrupt will be present, and below callback to
* be registered will never be called.
*/
+
+ /* RTE callback register. 8< */
rte_eth_dev_callback_register(portid,
RTE_ETH_EVENT_INTR_LSC, lsi_event_callback, NULL);
+ /* >8 End of registering lsi interrupt callback. */
ret = rte_eth_macaddr_get(portid,
&lsi_ports_eth_addr[portid]);
@@ -659,6 +673,7 @@ main(int argc, char **argv)
fflush(stdout);
rxq_conf = dev_info.default_rxconf;
rxq_conf.offloads = local_port_conf.rxmode.offloads;
+ /* RX queue initialization. 8< */
ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
rte_eth_dev_socket_id(portid),
&rxq_conf,
@@ -666,8 +681,9 @@ main(int argc, char **argv)
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: err=%d, port=%u\n",
ret, (unsigned) portid);
+ /* >8 End of RX queue initialization. */
- /* init one TX queue logical core on each port */
+ /* init one TX queue logical core on each port. 8< */
fflush(stdout);
txq_conf = dev_info.default_txconf;
txq_conf.offloads = local_port_conf.txmode.offloads;
@@ -677,6 +693,7 @@ main(int argc, char **argv)
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d,port=%u\n",
ret, (unsigned) portid);
+ /* >8 End of init one TX queue. */
/* Initialize TX buffers */
tx_buffer[portid] = rte_zmalloc_socket("tx_buffer",
@@ -87,6 +87,7 @@ main(int argc, char **argv)
if (ret < 0)
rte_exit(EXIT_FAILURE, "Cannot init EAL\n");
+ /* Start of ring structure. 8< */
if (rte_eal_process_type() == RTE_PROC_PRIMARY){
send_ring = rte_ring_create(_PRI_2_SEC, ring_size, rte_socket_id(), flags);
recv_ring = rte_ring_create(_SEC_2_PRI, ring_size, rte_socket_id(), flags);
@@ -99,6 +100,7 @@ main(int argc, char **argv)
send_ring = rte_ring_lookup(_SEC_2_PRI);
message_pool = rte_mempool_lookup(_MSG_POOL);
}
+ /* >8 End of ring structure. */
if (send_ring == NULL)
rte_exit(EXIT_FAILURE, "Problem getting sending ring\n");
if (recv_ring == NULL)
@@ -455,6 +455,7 @@ main(int argc, char **argv)
if (mp == NULL)
rte_exit(EXIT_FAILURE, "Cannot get memory pool for buffers\n");
+ /* Primary instance initialized. 8< */
if (num_ports & 1)
rte_exit(EXIT_FAILURE, "Application must use an even number of ports\n");
for(i = 0; i < num_ports; i++){
@@ -462,6 +463,7 @@ main(int argc, char **argv)
if (smp_port_init(ports[i], mp, (uint16_t)num_procs) < 0)
rte_exit(EXIT_FAILURE, "Error initialising ports\n");
}
+ /* >8 End of primary instance initialization. */
if (proc_type == RTE_PROC_PRIMARY)
check_all_ports_link_status((uint8_t)num_ports, (~0x0));
@@ -563,6 +563,8 @@ parse_drsp(struct ptpv2_data_slave_ordinary *ptp_data)
/* This function processes PTP packets, implementing slave PTP IEEE1588 L2
* functionality.
*/
+
+/* Parse ptp frames. 8< */
static void
parse_ptp_frames(uint16_t portid, struct rte_mbuf *m) {
struct ptp_header *ptp_hdr;
@@ -594,6 +596,7 @@ parse_ptp_frames(uint16_t portid, struct rte_mbuf *m) {
}
}
}
+/* >8 End of function processes PTP packets. */
/*
* The lcore main. This is the main thread that does the work, reading from an
@@ -612,7 +615,7 @@ lcore_main(void)
/* Run until the application is quit or killed. */
while (1) {
- /* Read packet from RX queues. */
+ /* Read packet from RX queues. 8< */
for (portid = 0; portid < ptp_enabled_port_nb; portid++) {
portid = ptp_enabled_ports[portid];
@@ -621,11 +624,14 @@ lcore_main(void)
if (likely(nb_rx == 0))
continue;
+ /* Packet is parsed to determine which type. 8< */
if (m->ol_flags & PKT_RX_IEEE1588_PTP)
parse_ptp_frames(portid, m);
+ /* >8 End of packet is parsed to determine which type. */
rte_pktmbuf_free(m);
}
+ /* >8 End of read packets from RX queues. */
}
}
@@ -732,32 +738,36 @@ main(int argc, char *argv[])
uint16_t portid;
- /* Initialize the Environment Abstraction Layer (EAL). */
+ /* Initialize the Environment Abstraction Layer (EAL). 8< */
int ret = rte_eal_init(argc, argv);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
+ /* >8 End of initialization of EAL. */
memset(&ptp_data, '\0', sizeof(struct ptpv2_data_slave_ordinary));
+ /* Parse specific arguments. 8< */
argc -= ret;
argv += ret;
ret = ptp_parse_args(argc, argv);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Error with PTP initialization\n");
+ /* >8 End of parsing specific arguments. */
/* Check that there is an even number of ports to send/receive on. */
nb_ports = rte_eth_dev_count_avail();
- /* Creates a new mempool in memory to hold the mbufs. */
+ /* Creates a new mempool in memory to hold the mbufs. 8< */
mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", NUM_MBUFS * nb_ports,
MBUF_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
+ /* >8 End of a new mempool in memory to hold the mbufs. */
if (mbuf_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
- /* Initialize all ports. */
+ /* Initialize all ports. 8< */
RTE_ETH_FOREACH_DEV(portid) {
if ((ptp_enabled_port_mask & (1 << portid)) != 0) {
if (port_init(portid, mbuf_pool) == 0) {
@@ -771,6 +781,7 @@ main(int argc, char *argv[])
} else
printf("Skipping disabled port %u\n", portid);
}
+ /* >8 End of initialization of all ports. */
if (ptp_enabled_port_nb == 0) {
rte_exit(EXIT_FAILURE,
@@ -14,10 +14,7 @@
#include <rte_mbuf.h>
#include <rte_meter.h>
-/*
- * Traffic metering configuration
- *
- */
+/* Traffic metering configuration. 8< */
#define APP_MODE_FWD 0
#define APP_MODE_SRTCM_COLOR_BLIND 1
#define APP_MODE_SRTCM_COLOR_AWARE 2
@@ -25,6 +22,7 @@
#define APP_MODE_TRTCM_COLOR_AWARE 4
#define APP_MODE APP_MODE_SRTCM_COLOR_BLIND
+/* >8 End of traffic metering configuration. */
#include "main.h"
@@ -88,6 +86,7 @@ static uint16_t port_tx;
static struct rte_mbuf *pkts_rx[PKT_RX_BURST_MAX];
struct rte_eth_dev_tx_buffer *tx_buffer;
+/* Traffic meter parameters are configured in the application. 8< */
struct rte_meter_srtcm_params app_srtcm_params = {
.cir = 1000000 * 46,
.cbs = 2048,
@@ -102,6 +101,7 @@ struct rte_meter_trtcm_params app_trtcm_params = {
.cbs = 2048,
.pbs = 2048
};
+/* >8 End of traffic meter parameters are configured in the application. */
struct rte_meter_trtcm_profile app_trtcm_profile;
@@ -12,12 +12,14 @@ enum policer_action {
DROP = 3,
};
+/* Policy implemented as a static structure. 8< */
enum policer_action policer_table[RTE_COLORS][RTE_COLORS] =
{
{ GREEN, RED, RED},
{ DROP, YELLOW, RED},
{ DROP, DROP, RED}
};
+/* >8 End of policy implemented as a static structure. */
#if APP_MODE == APP_MODE_FWD
@@ -57,6 +57,7 @@ int hw_timestamping;
#define TICKS_PER_CYCLE_SHIFT 16
static uint64_t ticks_per_cycle_mult;
+/* Callback added to the RX port and applied to packets. 8< */
static uint16_t
add_timestamps(uint16_t port __rte_unused, uint16_t qidx __rte_unused,
struct rte_mbuf **pkts, uint16_t nb_pkts,
@@ -69,7 +70,9 @@ add_timestamps(uint16_t port __rte_unused, uint16_t qidx __rte_unused,
*tsc_field(pkts[i]) = now;
return nb_pkts;
}
+/* >8 End of callback addition and application. */
+/* Callback is added to the TX port. 8< */
static uint16_t
calc_latency(uint16_t port, uint16_t qidx __rte_unused,
struct rte_mbuf **pkts, uint16_t nb_pkts, void *_ __rte_unused)
@@ -110,11 +113,14 @@ calc_latency(uint16_t port, uint16_t qidx __rte_unused,
}
return nb_pkts;
}
+/* >8 End of callback addition. */
/*
* Initialises a given port using global settings and with the rx buffers
* coming from the mbuf_pool passed as parameter
*/
+
+ /* Port initialization. 8< */
static inline int
port_init(uint16_t port, struct rte_mempool *mbuf_pool)
{
@@ -229,11 +235,14 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
if (retval != 0)
return retval;
+ /* RX and TX callbacks are added to the ports. 8< */
rte_eth_add_rx_callback(port, 0, add_timestamps, NULL);
rte_eth_add_tx_callback(port, 0, calc_latency, NULL);
+ /* >8 End of RX and TX callbacks. */
return 0;
}
+/* >8 End of port initialization. */
/*
* Main thread that does the work, reading from INPUT_PORT
@@ -189,6 +189,8 @@ configure_output_ports(const struct shared_info *info)
* the node will handle, which will be used to decide if packet
* is transmitted or dropped.
*/
+
+/* Creation of hash table. 8< */
static struct rte_hash *
create_hash_table(const struct shared_info *info)
{
@@ -243,6 +245,7 @@ populate_hash_table(const struct rte_hash *h, const struct shared_info *info)
printf("Hash table: Adding 0x%x keys\n", num_flows_node);
}
+/* >8 End of creation of hash table. */
/*
* This function performs routing of packets
@@ -263,6 +266,7 @@ transmit_packet(struct rte_mbuf *buf)
}
+/* Packets dequeued from the shared ring. 8< */
static inline void
handle_packets(struct rte_hash *h, struct rte_mbuf **bufs, uint16_t num_packets)
{
@@ -293,6 +297,7 @@ handle_packets(struct rte_hash *h, struct rte_mbuf **bufs, uint16_t num_packets)
}
}
}
+/* >8 End of packets dequeueing. */
/*
* Application main function - loops through
@@ -323,6 +328,7 @@ main(int argc, char *argv[])
if (rte_eth_dev_count_avail() == 0)
rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
+ /* Attaching to the server process memory. 8< */
rx_ring = rte_ring_lookup(get_rx_queue_name(node_id));
if (rx_ring == NULL)
rte_exit(EXIT_FAILURE, "Cannot get RX ring - "
@@ -338,6 +344,7 @@ main(int argc, char *argv[])
info = mz->addr;
tx_stats = &(info->tx_stats[node_id]);
filter_stats = &(info->filter_stats[node_id]);
+ /* >8 End of attaching to the server process memory. */
configure_output_ports(info);
@@ -200,6 +200,8 @@ init_shm_rings(void)
* Create EFD table which will contain all the flows
* that will be distributed among the nodes
*/
+
+/* Create EFD table. 8< */
static void
create_efd_table(void)
{
@@ -236,6 +238,7 @@ populate_efd_table(void)
printf("EFD table: Adding 0x%x keys\n", num_flows);
}
+/* >8 End of creation EFD table. */
/* Check the link status of all ports in up to 9s, and print them finally */
static void
@@ -99,6 +99,8 @@ get_printable_mac_addr(uint16_t port)
* thread in the server process, when the process is run with more
* than one lcore enabled.
*/
+
+/* Display recorded statistics. 8< */
static void
do_stats_display(void)
{
@@ -166,6 +168,7 @@ do_stats_display(void)
printf("\n");
}
+/* >8 End of displaying the recorded statistics. */
/*
* The function called from each non-main lcore used by the process.
@@ -212,6 +215,8 @@ clear_stats(void)
* send a burst of traffic to a node, assuming there are packets
* available to be sent to this node
*/
+
+/* Flush rx queue. 8< */
static void
flush_rx_queue(uint16_t node)
{
@@ -232,6 +237,7 @@ flush_rx_queue(uint16_t node)
cl_rx_buf[node].count = 0;
}
+/* >8 End of sending a burst of traffic to a node. */
/*
* marks a packet down to be sent to a particular node process
@@ -245,8 +251,10 @@ enqueue_rx_packet(uint8_t node, struct rte_mbuf *buf)
/*
* This function takes a group of packets and routes them
* individually to the node process. Very simply round-robins the packets
- * without checking any of the packet contents.
+ * without checking any of the packet contents. 8<
*/
+
+/* Processing packets. 8< */
static void
process_packets(uint32_t port_num __rte_unused, struct rte_mbuf *pkts[],
uint16_t rx_count, unsigned int socket_id)
@@ -288,6 +296,7 @@ process_packets(uint32_t port_num __rte_unused, struct rte_mbuf *pkts[],
for (i = 0; i < num_nodes; i++)
flush_rx_queue(i);
}
+/* >8 End of process_packets. */
/*
* Function called by the main lcore of the DPDK process.
@@ -176,6 +176,7 @@ main(int argc, char **argv)
for (i = 0; i < NUM_SERVICES; i++) {
services[i].callback_userdata = 0;
uint32_t id;
+ /* Register a service as an application. 8< */
ret = rte_service_component_register(&services[i], &id);
if (ret)
rte_exit(-1, "service register() failed");
@@ -198,6 +199,7 @@ main(int argc, char **argv)
ret = rte_service_runstate_set(id, 1);
if (ret)
return -ENOEXEC;
+ /* >8 End of registering a service as an application. */
}
i = 0;
@@ -17,11 +17,13 @@
#define MBUF_CACHE_SIZE 250
#define BURST_SIZE 32
+/* Configuration of ethernet ports. 8< */
static const struct rte_eth_conf port_conf_default = {
.rxmode = {
.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
},
};
+/* >8 End of configuration of ethernet ports. */
/* basicfwd.c: Basic DPDK skeleton forwarding example. */
@@ -29,6 +31,8 @@ static const struct rte_eth_conf port_conf_default = {
* Initializes a given port using global settings and with the RX buffers
* coming from the mbuf_pool passed as a parameter.
*/
+
+/* Main functional part of port initialization. 8< */
static inline int
port_init(uint16_t port, struct rte_mempool *mbuf_pool)
{
@@ -82,8 +86,9 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
return retval;
}
- /* Start the Ethernet port. */
+ /* Starting Ethernet port. 8< */
retval = rte_eth_dev_start(port);
+ /* >8 End of starting of ethernet port. */
if (retval < 0)
return retval;
@@ -102,16 +107,20 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
/* Enable RX in promiscuous mode for the Ethernet device. */
retval = rte_eth_promiscuous_enable(port);
+ /* End of setting RX port in promiscuous mode. */
if (retval != 0)
return retval;
return 0;
}
+/* >8 End of main functional part of port initialization. */
/*
* The lcore main. This is the main thread that does the work, reading from
* an input port and writing to an output port.
*/
+
+ /* Basic forwarding application lcore. 8< */
static __rte_noreturn void
lcore_main(void)
{
@@ -132,7 +141,7 @@ lcore_main(void)
printf("\nCore %u forwarding packets. [Ctrl+C to quit]\n",
rte_lcore_id());
- /* Run until the application is quit or killed. */
+ /* Main work of application loop. 8< */
for (;;) {
/*
* Receive packets on a port and forward them on the paired
@@ -160,7 +169,9 @@ lcore_main(void)
}
}
}
+ /* >8 End of loop. */
}
+/* >8 End Basic forwarding application lcore. */
/*
* The main function, which does initialization and calls the per-lcore
@@ -173,10 +184,11 @@ main(int argc, char *argv[])
unsigned nb_ports;
uint16_t portid;
- /* Initialize the Environment Abstraction Layer (EAL). */
+ /* Initializion the Environment Abstraction Layer (EAL). 8< */
int ret = rte_eal_init(argc, argv);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
+ /* >8 End of initializion the Environment Abstraction Layer (EAL). */
argc -= ret;
argv += ret;
@@ -187,23 +199,28 @@ main(int argc, char *argv[])
rte_exit(EXIT_FAILURE, "Error: number of ports must be even\n");
/* Creates a new mempool in memory to hold the mbufs. */
+
+ /* Allocates mempool to hold the mbufs. 8< */
mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", NUM_MBUFS * nb_ports,
MBUF_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
+ /* >8 End of allocating mempool to hold mbuf. */
if (mbuf_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
- /* Initialize all ports. */
+ /* Initializing all ports. 8< */
RTE_ETH_FOREACH_DEV(portid)
if (port_init(portid, mbuf_pool) != 0)
rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu16 "\n",
portid);
+ /* >8 End of initializing all ports. */
if (rte_lcore_count() > 1)
printf("\nWARNING: Too many lcores enabled. Only 1 used.\n");
- /* Call lcore_main on the main core only. */
+ /* Call lcore_main on the main core only. Called on single lcore. 8< */
lcore_main();
+ /* >8 End of called on single lcore. */
/* clean up the EAL */
rte_eal_cleanup();
@@ -22,7 +22,7 @@ static uint64_t timer_resolution_cycles;
static struct rte_timer timer0;
static struct rte_timer timer1;
-/* timer0 callback */
+/* timer0 callback. 8< */
static void
timer0_cb(__rte_unused struct rte_timer *tim,
__rte_unused void *arg)
@@ -37,8 +37,9 @@ timer0_cb(__rte_unused struct rte_timer *tim,
if ((counter ++) == 20)
rte_timer_stop(tim);
}
+/* >8 End of timer0 callback. */
-/* timer1 callback */
+/* timer1 callback. 8< */
static void
timer1_cb(__rte_unused struct rte_timer *tim,
__rte_unused void *arg)
@@ -53,6 +54,7 @@ timer1_cb(__rte_unused struct rte_timer *tim,
lcore_id = rte_get_next_lcore(lcore_id, 0, 1);
rte_timer_reset(tim, hz/3, SINGLE, lcore_id, timer1_cb, NULL);
}
+/* >8 End of timer1 callback. */
static __rte_noreturn int
lcore_mainloop(__rte_unused void *arg)
@@ -63,6 +65,7 @@ lcore_mainloop(__rte_unused void *arg)
lcore_id = rte_lcore_id();
printf("Starting mainloop on core %u\n", lcore_id);
+ /* Main loop. 8< */
while (1) {
/*
* Call the timer handler on each core: as we don't need a
@@ -77,6 +80,7 @@ lcore_mainloop(__rte_unused void *arg)
prev_tsc = cur_tsc;
}
}
+ /* >8 End of main loop. */
}
int
@@ -86,22 +90,24 @@ main(int argc, char **argv)
uint64_t hz;
unsigned lcore_id;
- /* init EAL */
+ /* Init EAL. 8< */
ret = rte_eal_init(argc, argv);
if (ret < 0)
rte_panic("Cannot init EAL\n");
/* init RTE timer library */
rte_timer_subsystem_init();
+ /* >8 End of init EAL. */
- /* init timer structures */
+ /* Init timer structures. 8< */
rte_timer_init(&timer0);
rte_timer_init(&timer1);
+ /* >8 End of init timer structures. */
+ /* Load timer0, every second, on main lcore, reloaded automatically. 8< */
hz = rte_get_timer_hz();
timer_resolution_cycles = hz * 10 / 1000; /* around 10ms */
- /* load timer0, every second, on main lcore, reloaded automatically */
lcore_id = rte_lcore_id();
rte_timer_reset(&timer0, hz, PERIODICAL, lcore_id, timer0_cb, NULL);
@@ -109,13 +115,16 @@ main(int argc, char **argv)
lcore_id = rte_get_next_lcore(lcore_id, 0, 1);
rte_timer_reset(&timer1, hz/3, SINGLE, lcore_id, timer1_cb, NULL);
- /* call lcore_mainloop() on every worker lcore */
+ /* >8 End of two timers configured. */
+
+ /* Call lcore_mainloop() on every worker lcore. 8< */
RTE_LCORE_FOREACH_WORKER(lcore_id) {
rte_eal_remote_launch(lcore_mainloop, NULL, lcore_id);
}
/* call it on main lcore too */
(void) lcore_mainloop(NULL);
+ /* >8 End of call lcore_mainloop() on every worker lcore. */
/* clean up the EAL */
rte_eal_cleanup();
@@ -61,6 +61,8 @@ static uint32_t num_queues = 8;
static uint32_t num_pools = 8;
static uint8_t rss_enable;
+/* Default structure for VMDq. 8< */
+
/* empty vmdq configuration structure. Filled in programatically */
static const struct rte_eth_conf vmdq_conf_default = {
.rxmode = {
@@ -85,6 +87,7 @@ static const struct rte_eth_conf vmdq_conf_default = {
},
},
};
+/* >8 End of Empty vdmq configuration structure. */
static unsigned lcore_ids[RTE_MAX_LCORE];
static uint16_t ports[RTE_MAX_ETHPORTS];
@@ -93,6 +96,7 @@ static unsigned num_ports; /**< The number of ports specified in command line */
/* array used for printing out statistics */
volatile unsigned long rxPackets[MAX_QUEUES] = {0};
+/* vlan_tags 8< */
const uint16_t vlan_tags[] = {
0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15,
@@ -103,13 +107,19 @@ const uint16_t vlan_tags[] = {
48, 49, 50, 51, 52, 53, 54, 55,
56, 57, 58, 59, 60, 61, 62, 63,
};
+/* >8 End of vlan_tags. */
+
const uint16_t num_vlans = RTE_DIM(vlan_tags);
static uint16_t num_pf_queues, num_vmdq_queues;
static uint16_t vmdq_pool_base, vmdq_queue_base;
+
+/* Pool mac address template. 8< */
+
/* pool mac addr template, pool mac addr is like: 52 54 00 12 port# pool# */
static struct rte_ether_addr pool_addr_template = {
.addr_bytes = {0x52, 0x54, 0x00, 0x12, 0x00, 0x00}
};
+/* >8 End of mac addr template. */
/* ethernet addresses of ports */
static struct rte_ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
@@ -125,6 +135,8 @@ static struct rte_ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
* given above, and determine the queue number and pool map number according to
* valid pool number
*/
+
+ /* Building correct configruration for vdmq. 8< */
static inline int
get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_pools)
{
@@ -337,6 +349,7 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
return 0;
}
+/* >8 End of get_eth_conf. */
/* Check num_pools parameter and set it if OK*/
static int
@@ -66,7 +66,7 @@ static uint16_t num_queues, num_vmdq_queues;
static uint16_t vmdq_pool_base, vmdq_queue_base;
static uint8_t rss_enable;
-/* empty vmdq+dcb configuration structure. Filled in programatically */
+/* Empty vmdq+dcb configuration structure. Filled in programmatically. 8< */
static const struct rte_eth_conf vmdq_dcb_conf_default = {
.rxmode = {
.mq_mode = ETH_MQ_RX_VMDQ_DCB,
@@ -108,10 +108,12 @@ static const struct rte_eth_conf vmdq_dcb_conf_default = {
},
},
};
+/* >8 End of empty vmdq+dcb configuration structure. */
/* array used for printing out statistics */
volatile unsigned long rxPackets[MAX_QUEUES] = {0};
+/* Dividing up the possible user priority values. 8< */
const uint16_t vlan_tags[] = {
0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15,
@@ -179,6 +181,7 @@ get_eth_conf(struct rte_eth_conf *eth_conf)
}
return 0;
}
+/* >8 End of dividing up the possible user priority values. */
/*
* Initialises a given port using global settings and with the rx buffers
@@ -349,7 +352,7 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
vmdq_ports_eth_addr[port].addr_bytes[4],
vmdq_ports_eth_addr[port].addr_bytes[5]);
- /* Set mac for each pool.*/
+ /* Set mac for each pool. 8< */
for (q = 0; q < num_pools; q++) {
struct rte_ether_addr mac;
@@ -368,6 +371,7 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
return retval;
}
}
+ /* >8 End of set mac for each pool. */
return 0;
}