@@ -366,6 +366,10 @@ static void cmd_help_long_parsed(void *parsed_result,
"csum show (port_id)\n"
" Display tx checksum offload configuration\n\n"
+ "csum txprep (on|off)"
+ " Enable tx preparation path in csum forward engine"
+ "\n\n"
+
"tso set (segsize) (portid)\n"
" Enable TCP Segmentation Offload in csum forward"
" engine.\n"
@@ -3528,6 +3532,50 @@ struct cmd_csum_tunnel_result {
},
};
+/* Enable/disable tx preparation path */
+struct cmd_csum_txprep_result {
+ cmdline_fixed_string_t csum;
+ cmdline_fixed_string_t parse;
+ cmdline_fixed_string_t onoff;
+};
+
+static void
+cmd_csum_txprep_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_csum_txprep_result *res = parsed_result;
+
+ if (!strcmp(res->onoff, "on"))
+ tx_prepare = 1;
+ else
+ tx_prepare = 0;
+
+}
+
+cmdline_parse_token_string_t cmd_csum_txprep_csum =
+ TOKEN_STRING_INITIALIZER(struct cmd_csum_txprep_result,
+ csum, "csum");
+cmdline_parse_token_string_t cmd_csum_txprep_parse =
+ TOKEN_STRING_INITIALIZER(struct cmd_csum_txprep_result,
+ parse, "txprep");
+cmdline_parse_token_string_t cmd_csum_txprep_onoff =
+ TOKEN_STRING_INITIALIZER(struct cmd_csum_txprep_result,
+ onoff, "on#off");
+
+cmdline_parse_inst_t cmd_csum_txprep = {
+ .f = cmd_csum_txprep_parsed,
+ .data = NULL,
+ .help_str = "csum txprep on|off: Enable/Disable tx preparation path "
+ "for csum engine",
+ .tokens = {
+ (void *)&cmd_csum_txprep_csum,
+ (void *)&cmd_csum_txprep_parse,
+ (void *)&cmd_csum_txprep_onoff,
+ NULL,
+ },
+};
+
/* *** ENABLE HARDWARE SEGMENTATION IN TX NON-TUNNELED PACKETS *** */
struct cmd_tso_set_result {
cmdline_fixed_string_t tso;
@@ -11518,6 +11566,7 @@ struct cmd_set_vf_mac_addr_result {
(cmdline_parse_inst_t *)&cmd_csum_set,
(cmdline_parse_inst_t *)&cmd_csum_show,
(cmdline_parse_inst_t *)&cmd_csum_tunnel,
+ (cmdline_parse_inst_t *)&cmd_csum_txprep,
(cmdline_parse_inst_t *)&cmd_tso_set,
(cmdline_parse_inst_t *)&cmd_tso_show,
(cmdline_parse_inst_t *)&cmd_tunnel_tso_set,
@@ -372,8 +372,10 @@ struct simple_gre_hdr {
udp_hdr->dgram_cksum = 0;
if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_UDP_CKSUM) {
ol_flags |= PKT_TX_UDP_CKSUM;
- udp_hdr->dgram_cksum = get_psd_sum(l3_hdr,
- info->ethertype, ol_flags);
+ if (!tx_prepare)
+ udp_hdr->dgram_cksum = get_psd_sum(
+ l3_hdr, info->ethertype,
+ ol_flags);
} else {
udp_hdr->dgram_cksum =
get_udptcp_checksum(l3_hdr, udp_hdr,
@@ -385,12 +387,15 @@ struct simple_gre_hdr {
tcp_hdr->cksum = 0;
if (tso_segsz) {
ol_flags |= PKT_TX_TCP_SEG;
- tcp_hdr->cksum = get_psd_sum(l3_hdr, info->ethertype,
- ol_flags);
+ if (!tx_prepare)
+ tcp_hdr->cksum = get_psd_sum(l3_hdr,
+ info->ethertype, ol_flags);
+
} else if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_TCP_CKSUM) {
ol_flags |= PKT_TX_TCP_CKSUM;
- tcp_hdr->cksum = get_psd_sum(l3_hdr, info->ethertype,
- ol_flags);
+ if (!tx_prepare)
+ tcp_hdr->cksum = get_psd_sum(l3_hdr,
+ info->ethertype, ol_flags);
} else {
tcp_hdr->cksum =
get_udptcp_checksum(l3_hdr, tcp_hdr,
@@ -648,6 +653,7 @@ struct simple_gre_hdr {
void *l3_hdr = NULL, *outer_l3_hdr = NULL; /* can be IPv4 or IPv6 */
uint16_t nb_rx;
uint16_t nb_tx;
+ uint16_t nb_prep;
uint16_t i;
uint64_t rx_ol_flags, tx_ol_flags;
uint16_t testpmd_ol_flags;
@@ -857,7 +863,20 @@ struct simple_gre_hdr {
printf("\n");
}
}
- nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
+
+ if (tx_prepare) {
+ nb_prep = rte_eth_tx_prepare(fs->tx_port, fs->tx_queue,
+ pkts_burst, nb_rx);
+ if (nb_prep != nb_rx)
+ printf("Preparing packet burst to transmit failed: %s\n",
+ rte_strerror(rte_errno));
+
+ nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst,
+ nb_prep);
+ } else
+ nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst,
+ nb_rx);
+
/*
* Retry if necessary
*/
@@ -180,6 +180,11 @@ struct fwd_engine * fwd_engines[] = {
enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
/**< Split policy for packets to TX. */
+/*
+ * Enable Tx preparation path in the "csum" engine.
+ */
+uint8_t tx_prepare;
+
uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
@@ -383,6 +383,8 @@ enum tx_pkt_split {
extern enum tx_pkt_split tx_pkt_split;
+extern uint8_t tx_prepare;
+
extern uint16_t nb_pkt_per_burst;
extern uint16_t mb_mempool_cache;
extern int8_t rx_pthresh;
@@ -750,6 +750,19 @@ Display tx checksum offload configuration::
testpmd> csum show (port_id)
+csum txprep
+~~~~~~~~~~~
+
+Select TX preparation path for the ``csum`` forwarding engine::
+
+ testpmd> csum txprep (on|off)
+
+If enabled, the csum forward engine uses TX preparation API for full packet
+preparation and verification before TX burst.
+
+If disabled, csum engine initializes all required fields on application level
+and TX preparation stage is not executed.
+
tso set
~~~~~~~